Initial commit

This commit is contained in:
Donny
2019-04-22 20:46:32 +08:00
commit 49ab8aadd1
25441 changed files with 4055000 additions and 0 deletions

20
vendor/github.com/siddontang/go-mysql/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2014 siddontang
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,42 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"canal.go",
"config.go",
"dump.go",
"handler.go",
"master.go",
"rows.go",
"sync.go",
],
importmap = "go-common/vendor/github.com/siddontang/go-mysql/canal",
importpath = "github.com/siddontang/go-mysql/canal",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/BurntSushi/toml:go_default_library",
"//vendor/github.com/juju/errors:go_default_library",
"//vendor/github.com/satori/go.uuid:go_default_library",
"//vendor/github.com/siddontang/go-mysql/client:go_default_library",
"//vendor/github.com/siddontang/go-mysql/dump:go_default_library",
"//vendor/github.com/siddontang/go-mysql/mysql:go_default_library",
"//vendor/github.com/siddontang/go-mysql/replication:go_default_library",
"//vendor/github.com/siddontang/go-mysql/schema:go_default_library",
"//vendor/github.com/sirupsen/logrus:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

443
vendor/github.com/siddontang/go-mysql/canal/canal.go generated vendored Normal file
View File

@@ -0,0 +1,443 @@
package canal
import (
"context"
"fmt"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/juju/errors"
"github.com/siddontang/go-mysql/client"
"github.com/siddontang/go-mysql/dump"
"github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/replication"
"github.com/siddontang/go-mysql/schema"
log "github.com/sirupsen/logrus"
)
// Canal can sync your MySQL data into everywhere, like Elasticsearch, Redis, etc...
// MySQL must open row format for binlog
type Canal struct {
m sync.Mutex
cfg *Config
useGTID bool
master *masterInfo
dumper *dump.Dumper
dumpDoneCh chan struct{}
syncer *replication.BinlogSyncer
eventHandler EventHandler
connLock sync.Mutex
conn *client.Conn
tableLock sync.RWMutex
tables map[string]*schema.Table
errorTablesGetTime map[string]time.Time
tableMatchCache map[string]bool
includeTableRegex []*regexp.Regexp
excludeTableRegex []*regexp.Regexp
ctx context.Context
cancel context.CancelFunc
}
// canal will retry fetching unknown table's meta after UnknownTableRetryPeriod
var UnknownTableRetryPeriod = time.Second * time.Duration(10)
var ErrExcludedTable = errors.New("excluded table meta")
func NewCanal(cfg *Config) (*Canal, error) {
c := new(Canal)
c.cfg = cfg
c.ctx, c.cancel = context.WithCancel(context.Background())
c.dumpDoneCh = make(chan struct{})
c.eventHandler = &DummyEventHandler{}
c.tables = make(map[string]*schema.Table)
if c.cfg.DiscardNoMetaRowEvent {
c.errorTablesGetTime = make(map[string]time.Time)
}
c.master = &masterInfo{}
var err error
if err = c.prepareDumper(); err != nil {
return nil, errors.Trace(err)
}
if err = c.prepareSyncer(); err != nil {
return nil, errors.Trace(err)
}
if err := c.checkBinlogRowFormat(); err != nil {
return nil, errors.Trace(err)
}
// init table filter
if n := len(c.cfg.IncludeTableRegex); n > 0 {
c.includeTableRegex = make([]*regexp.Regexp, n)
for i, val := range c.cfg.IncludeTableRegex {
reg, err := regexp.Compile(val)
if err != nil {
return nil, errors.Trace(err)
}
c.includeTableRegex[i] = reg
}
}
if n := len(c.cfg.ExcludeTableRegex); n > 0 {
c.excludeTableRegex = make([]*regexp.Regexp, n)
for i, val := range c.cfg.ExcludeTableRegex {
reg, err := regexp.Compile(val)
if err != nil {
return nil, errors.Trace(err)
}
c.excludeTableRegex[i] = reg
}
}
if c.includeTableRegex != nil || c.excludeTableRegex != nil {
c.tableMatchCache = make(map[string]bool)
}
return c, nil
}
func (c *Canal) prepareDumper() error {
var err error
dumpPath := c.cfg.Dump.ExecutionPath
if len(dumpPath) == 0 {
// ignore mysqldump, use binlog only
return nil
}
if c.dumper, err = dump.NewDumper(dumpPath,
c.cfg.Addr, c.cfg.User, c.cfg.Password); err != nil {
return errors.Trace(err)
}
if c.dumper == nil {
//no mysqldump, use binlog only
return nil
}
dbs := c.cfg.Dump.Databases
tables := c.cfg.Dump.Tables
tableDB := c.cfg.Dump.TableDB
if len(tables) == 0 {
c.dumper.AddDatabases(dbs...)
} else {
c.dumper.AddTables(tableDB, tables...)
}
charset := c.cfg.Charset
c.dumper.SetCharset(charset)
c.dumper.SkipMasterData(c.cfg.Dump.SkipMasterData)
c.dumper.SetMaxAllowedPacket(c.cfg.Dump.MaxAllowedPacketMB)
for _, ignoreTable := range c.cfg.Dump.IgnoreTables {
if seps := strings.Split(ignoreTable, ","); len(seps) == 2 {
c.dumper.AddIgnoreTables(seps[0], seps[1])
}
}
if c.cfg.Dump.DiscardErr {
c.dumper.SetErrOut(ioutil.Discard)
} else {
c.dumper.SetErrOut(os.Stderr)
}
return nil
}
// Run will first try to dump all data from MySQL master `mysqldump`,
// then sync from the binlog position in the dump data.
// It will run forever until meeting an error or Canal closed.
func (c *Canal) Run() error {
return c.run()
}
// RunFrom will sync from the binlog position directly, ignore mysqldump.
func (c *Canal) RunFrom(pos mysql.Position) error {
c.useGTID = false
c.master.Update(pos)
return c.Run()
}
func (c *Canal) StartFromGTID(set mysql.GTIDSet) error {
c.useGTID = true
c.master.UpdateGTID(set)
return c.Run()
}
func (c *Canal) run() error {
defer func() {
c.cancel()
}()
err := c.tryDump()
close(c.dumpDoneCh)
if err != nil {
log.Errorf("canal dump mysql err: %v", err)
return errors.Trace(err)
}
if err = c.runSyncBinlog(); err != nil {
log.Errorf("canal start sync binlog err: %v", err)
return errors.Trace(err)
}
return nil
}
func (c *Canal) Close() {
log.Infof("closing canal")
c.m.Lock()
defer c.m.Unlock()
c.cancel()
c.connLock.Lock()
c.conn.Close()
c.conn = nil
c.connLock.Unlock()
c.syncer.Close()
c.eventHandler.OnPosSynced(c.master.Position(), true)
}
func (c *Canal) WaitDumpDone() <-chan struct{} {
return c.dumpDoneCh
}
func (c *Canal) Ctx() context.Context {
return c.ctx
}
func (c *Canal) checkTableMatch(key string) bool {
// no filter, return true
if c.tableMatchCache == nil {
return true
}
c.tableLock.RLock()
rst, ok := c.tableMatchCache[key]
c.tableLock.RUnlock()
if ok {
// cache hit
return rst
}
matchFlag := false
// check include
if c.includeTableRegex != nil {
for _, reg := range c.includeTableRegex {
if reg.MatchString(key) {
matchFlag = true
break
}
}
}
// check exclude
if matchFlag && c.excludeTableRegex != nil {
for _, reg := range c.excludeTableRegex {
if reg.MatchString(key) {
matchFlag = false
break
}
}
}
c.tableLock.Lock()
c.tableMatchCache[key] = matchFlag
c.tableLock.Unlock()
return matchFlag
}
func (c *Canal) GetTable(db string, table string) (*schema.Table, error) {
key := fmt.Sprintf("%s.%s", db, table)
// if table is excluded, return error and skip parsing event or dump
if !c.checkTableMatch(key) {
return nil, ErrExcludedTable
}
c.tableLock.RLock()
t, ok := c.tables[key]
c.tableLock.RUnlock()
if ok {
return t, nil
}
if c.cfg.DiscardNoMetaRowEvent {
c.tableLock.RLock()
lastTime, ok := c.errorTablesGetTime[key]
c.tableLock.RUnlock()
if ok && time.Now().Sub(lastTime) < UnknownTableRetryPeriod {
return nil, schema.ErrMissingTableMeta
}
}
t, err := schema.NewTable(c, db, table)
if err != nil {
// check table not exists
if ok, err1 := schema.IsTableExist(c, db, table); err1 == nil && !ok {
return nil, schema.ErrTableNotExist
}
// work around : RDS HAHeartBeat
// ref : https://github.com/alibaba/canal/blob/master/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java#L385
// issue : https://github.com/alibaba/canal/issues/222
// This is a common error in RDS that canal can't get HAHealthCheckSchema's meta, so we mock a table meta.
// If canal just skip and log error, as RDS HA heartbeat interval is very short, so too many HAHeartBeat errors will be logged.
if key == schema.HAHealthCheckSchema {
// mock ha_health_check meta
ta := &schema.Table{
Schema: db,
Name: table,
Columns: make([]schema.TableColumn, 0, 2),
Indexes: make([]*schema.Index, 0),
}
ta.AddColumn("id", "bigint(20)", "", "")
ta.AddColumn("type", "char(1)", "", "")
c.tableLock.Lock()
c.tables[key] = ta
c.tableLock.Unlock()
return ta, nil
}
// if DiscardNoMetaRowEvent is true, we just log this error
if c.cfg.DiscardNoMetaRowEvent {
c.tableLock.Lock()
c.errorTablesGetTime[key] = time.Now()
c.tableLock.Unlock()
// log error and return ErrMissingTableMeta
log.Errorf("canal get table meta err: %v", errors.Trace(err))
return nil, schema.ErrMissingTableMeta
}
return nil, err
}
c.tableLock.Lock()
c.tables[key] = t
if c.cfg.DiscardNoMetaRowEvent {
// if get table info success, delete this key from errorTablesGetTime
delete(c.errorTablesGetTime, key)
}
c.tableLock.Unlock()
return t, nil
}
// ClearTableCache clear table cache
func (c *Canal) ClearTableCache(db []byte, table []byte) {
key := fmt.Sprintf("%s.%s", db, table)
c.tableLock.Lock()
delete(c.tables, key)
if c.cfg.DiscardNoMetaRowEvent {
delete(c.errorTablesGetTime, key)
}
c.tableLock.Unlock()
}
// Check MySQL binlog row image, must be in FULL, MINIMAL, NOBLOB
func (c *Canal) CheckBinlogRowImage(image string) error {
// need to check MySQL binlog row image? full, minimal or noblob?
// now only log
if c.cfg.Flavor == mysql.MySQLFlavor {
if res, err := c.Execute(`SHOW GLOBAL VARIABLES LIKE "binlog_row_image"`); err != nil {
return errors.Trace(err)
} else {
// MySQL has binlog row image from 5.6, so older will return empty
rowImage, _ := res.GetString(0, 1)
if rowImage != "" && !strings.EqualFold(rowImage, image) {
return errors.Errorf("MySQL uses %s binlog row image, but we want %s", rowImage, image)
}
}
}
return nil
}
func (c *Canal) checkBinlogRowFormat() error {
res, err := c.Execute(`SHOW GLOBAL VARIABLES LIKE "binlog_format";`)
if err != nil {
return errors.Trace(err)
} else if f, _ := res.GetString(0, 1); f != "ROW" {
return errors.Errorf("binlog must ROW format, but %s now", f)
}
return nil
}
func (c *Canal) prepareSyncer() error {
seps := strings.Split(c.cfg.Addr, ":")
if len(seps) != 2 {
return errors.Errorf("invalid mysql addr format %s, must host:port", c.cfg.Addr)
}
port, err := strconv.ParseUint(seps[1], 10, 16)
if err != nil {
return errors.Trace(err)
}
cfg := replication.BinlogSyncerConfig{
ServerID: c.cfg.ServerID,
Flavor: c.cfg.Flavor,
Host: seps[0],
Port: uint16(port),
User: c.cfg.User,
Password: c.cfg.Password,
Charset: c.cfg.Charset,
HeartbeatPeriod: c.cfg.HeartbeatPeriod,
ReadTimeout: c.cfg.ReadTimeout,
UseDecimal: c.cfg.UseDecimal,
}
c.syncer = replication.NewBinlogSyncer(cfg)
return nil
}
// Execute a SQL
func (c *Canal) Execute(cmd string, args ...interface{}) (rr *mysql.Result, err error) {
c.connLock.Lock()
defer c.connLock.Unlock()
retryNum := 3
for i := 0; i < retryNum; i++ {
if c.conn == nil {
c.conn, err = client.Connect(c.cfg.Addr, c.cfg.User, c.cfg.Password, "")
if err != nil {
return nil, errors.Trace(err)
}
}
rr, err = c.conn.Execute(cmd, args...)
if err != nil && !mysql.ErrorEqual(err, mysql.ErrBadConn) {
return
} else if mysql.ErrorEqual(err, mysql.ErrBadConn) {
c.conn.Close()
c.conn = nil
continue
} else {
return
}
}
return
}
func (c *Canal) SyncedPosition() mysql.Position {
return c.master.Position()
}

103
vendor/github.com/siddontang/go-mysql/canal/config.go generated vendored Normal file
View File

@@ -0,0 +1,103 @@
package canal
import (
"io/ioutil"
"math/rand"
"time"
"github.com/BurntSushi/toml"
"github.com/juju/errors"
"github.com/siddontang/go-mysql/mysql"
)
type DumpConfig struct {
// mysqldump execution path, like mysqldump or /usr/bin/mysqldump, etc...
// If not set, ignore using mysqldump.
ExecutionPath string `toml:"mysqldump"`
// Will override Databases, tables is in database table_db
Tables []string `toml:"tables"`
TableDB string `toml:"table_db"`
Databases []string `toml:"dbs"`
// Ignore table format is db.table
IgnoreTables []string `toml:"ignore_tables"`
// If true, discard error msg, else, output to stderr
DiscardErr bool `toml:"discard_err"`
// Set true to skip --master-data if we have no privilege to do
// 'FLUSH TABLES WITH READ LOCK'
SkipMasterData bool `toml:"skip_master_data"`
// Set to change the default max_allowed_packet size
MaxAllowedPacketMB int `toml:"max_allowed_packet_mb"`
}
type Config struct {
Addr string `toml:"addr"`
User string `toml:"user"`
Password string `toml:"password"`
Charset string `toml:"charset"`
ServerID uint32 `toml:"server_id"`
Flavor string `toml:"flavor"`
HeartbeatPeriod time.Duration `toml:"heartbeat_period"`
ReadTimeout time.Duration `toml:"read_timeout"`
// IncludeTableRegex or ExcludeTableRegex should contain database name
// Only a table which matches IncludeTableRegex and dismatches ExcludeTableRegex will be processed
// eg, IncludeTableRegex : [".*\\.canal"], ExcludeTableRegex : ["mysql\\..*"]
// this will include all database's 'canal' table, except database 'mysql'
// Default IncludeTableRegex and ExcludeTableRegex are empty, this will include all tables
IncludeTableRegex []string `toml:include_table_regex`
ExcludeTableRegex []string `toml:exclude_table_regex`
// discard row event without table meta
DiscardNoMetaRowEvent bool `toml:"discard_no_meta_row_event"`
Dump DumpConfig `toml:"dump"`
UseDecimal bool `toml:"use_decimal"`
}
func NewConfigWithFile(name string) (*Config, error) {
data, err := ioutil.ReadFile(name)
if err != nil {
return nil, errors.Trace(err)
}
return NewConfig(string(data))
}
func NewConfig(data string) (*Config, error) {
var c Config
_, err := toml.Decode(data, &c)
if err != nil {
return nil, errors.Trace(err)
}
return &c, nil
}
func NewDefaultConfig() *Config {
c := new(Config)
c.Addr = "127.0.0.1:3306"
c.User = "root"
c.Password = ""
c.Charset = mysql.DEFAULT_CHARSET
rand.Seed(time.Now().Unix())
c.ServerID = uint32(rand.Intn(1000)) + 1001
c.Flavor = "mysql"
c.Dump.ExecutionPath = "mysqldump"
c.Dump.DiscardErr = true
c.Dump.SkipMasterData = false
return c
}

139
vendor/github.com/siddontang/go-mysql/canal/dump.go generated vendored Normal file
View File

@@ -0,0 +1,139 @@
package canal
import (
"strconv"
"time"
"github.com/juju/errors"
"github.com/siddontang/go-mysql/dump"
"github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/schema"
log "github.com/sirupsen/logrus"
)
type dumpParseHandler struct {
c *Canal
name string
pos uint64
}
func (h *dumpParseHandler) BinLog(name string, pos uint64) error {
h.name = name
h.pos = pos
return nil
}
func (h *dumpParseHandler) Data(db string, table string, values []string) error {
if err := h.c.ctx.Err(); err != nil {
return err
}
tableInfo, err := h.c.GetTable(db, table)
if err != nil {
e := errors.Cause(err)
if e == ErrExcludedTable ||
e == schema.ErrTableNotExist ||
e == schema.ErrMissingTableMeta {
return nil
}
log.Errorf("get %s.%s information err: %v", db, table, err)
return errors.Trace(err)
}
vs := make([]interface{}, len(values))
for i, v := range values {
if v == "NULL" {
vs[i] = nil
} else if v[0] != '\'' {
if tableInfo.Columns[i].Type == schema.TYPE_NUMBER {
n, err := strconv.ParseInt(v, 10, 64)
if err != nil {
log.Errorf("parse row %v at %d error %v, skip", values, i, err)
return dump.ErrSkip
}
vs[i] = n
} else if tableInfo.Columns[i].Type == schema.TYPE_FLOAT {
f, err := strconv.ParseFloat(v, 64)
if err != nil {
log.Errorf("parse row %v at %d error %v, skip", values, i, err)
return dump.ErrSkip
}
vs[i] = f
} else {
log.Errorf("parse row %v error, invalid type at %d, skip", values, i)
return dump.ErrSkip
}
} else {
vs[i] = v[1 : len(v)-1]
}
}
events := newRowsEvent(tableInfo, InsertAction, [][]interface{}{vs})
return h.c.eventHandler.OnRow(events)
}
func (c *Canal) AddDumpDatabases(dbs ...string) {
if c.dumper == nil {
return
}
c.dumper.AddDatabases(dbs...)
}
func (c *Canal) AddDumpTables(db string, tables ...string) {
if c.dumper == nil {
return
}
c.dumper.AddTables(db, tables...)
}
func (c *Canal) AddDumpIgnoreTables(db string, tables ...string) {
if c.dumper == nil {
return
}
c.dumper.AddIgnoreTables(db, tables...)
}
func (c *Canal) tryDump() error {
pos := c.master.Position()
gtid := c.master.GTID()
if (len(pos.Name) > 0 && pos.Pos > 0) || gtid != nil {
// we will sync with binlog name and position
log.Infof("skip dump, use last binlog replication pos %s or GTID %s", pos, gtid)
return nil
}
if c.dumper == nil {
log.Info("skip dump, no mysqldump")
return nil
}
h := &dumpParseHandler{c: c}
if c.cfg.Dump.SkipMasterData {
pos, err := c.GetMasterPos()
if err != nil {
return errors.Trace(err)
}
log.Infof("skip master data, get current binlog position %v", pos)
h.name = pos.Name
h.pos = uint64(pos.Pos)
}
start := time.Now()
log.Info("try dump MySQL and parse")
if err := c.dumper.DumpAndParse(h); err != nil {
return errors.Trace(err)
}
log.Infof("dump MySQL and parse OK, use %0.2f seconds, start binlog replication at (%s, %d)",
time.Now().Sub(start).Seconds(), h.name, h.pos)
pos = mysql.Position{h.name, uint32(h.pos)}
c.master.Update(pos)
c.eventHandler.OnPosSynced(pos, true)
return nil
}

36
vendor/github.com/siddontang/go-mysql/canal/handler.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
package canal
import (
"github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/replication"
)
type EventHandler interface {
OnRotate(roateEvent *replication.RotateEvent) error
OnDDL(nextPos mysql.Position, queryEvent *replication.QueryEvent) error
OnRow(e *RowsEvent) error
OnXID(nextPos mysql.Position) error
OnGTID(gtid mysql.GTIDSet) error
// OnPosSynced Use your own way to sync position. When force is true, sync position immediately.
OnPosSynced(pos mysql.Position, force bool) error
String() string
}
type DummyEventHandler struct {
}
func (h *DummyEventHandler) OnRotate(*replication.RotateEvent) error { return nil }
func (h *DummyEventHandler) OnDDL(mysql.Position, *replication.QueryEvent) error {
return nil
}
func (h *DummyEventHandler) OnRow(*RowsEvent) error { return nil }
func (h *DummyEventHandler) OnXID(mysql.Position) error { return nil }
func (h *DummyEventHandler) OnGTID(mysql.GTIDSet) error { return nil }
func (h *DummyEventHandler) OnPosSynced(mysql.Position, bool) error { return nil }
func (h *DummyEventHandler) String() string { return "DummyEventHandler" }
// `SetEventHandler` registers the sync handler, you must register your
// own handler before starting Canal.
func (c *Canal) SetEventHandler(h EventHandler) {
c.eventHandler = h
}

46
vendor/github.com/siddontang/go-mysql/canal/master.go generated vendored Normal file
View File

@@ -0,0 +1,46 @@
package canal
import (
"sync"
"github.com/siddontang/go-mysql/mysql"
log "github.com/sirupsen/logrus"
)
type masterInfo struct {
sync.RWMutex
pos mysql.Position
gtid mysql.GTIDSet
}
func (m *masterInfo) Update(pos mysql.Position) {
log.Debugf("update master position %s", pos)
m.Lock()
m.pos = pos
m.Unlock()
}
func (m *masterInfo) UpdateGTID(gtid mysql.GTIDSet) {
log.Debugf("update master gtid %s", gtid.String())
m.Lock()
m.gtid = gtid
m.Unlock()
}
func (m *masterInfo) Position() mysql.Position {
m.RLock()
defer m.RUnlock()
return m.pos
}
func (m *masterInfo) GTID() mysql.GTIDSet {
m.RLock()
defer m.RUnlock()
return m.gtid
}

69
vendor/github.com/siddontang/go-mysql/canal/rows.go generated vendored Normal file
View File

@@ -0,0 +1,69 @@
package canal
import (
"fmt"
"github.com/juju/errors"
"github.com/siddontang/go-mysql/schema"
)
const (
UpdateAction = "update"
InsertAction = "insert"
DeleteAction = "delete"
)
type RowsEvent struct {
Table *schema.Table
Action string
// changed row list
// binlog has three update event version, v0, v1 and v2.
// for v1 and v2, the rows number must be even.
// Two rows for one event, format is [before update row, after update row]
// for update v0, only one row for a event, and we don't support this version.
Rows [][]interface{}
}
func newRowsEvent(table *schema.Table, action string, rows [][]interface{}) *RowsEvent {
e := new(RowsEvent)
e.Table = table
e.Action = action
e.Rows = rows
return e
}
// Get primary keys in one row for a table, a table may use multi fields as the PK
func GetPKValues(table *schema.Table, row []interface{}) ([]interface{}, error) {
indexes := table.PKColumns
if len(indexes) == 0 {
return nil, errors.Errorf("table %s has no PK", table)
} else if len(table.Columns) != len(row) {
return nil, errors.Errorf("table %s has %d columns, but row data %v len is %d", table,
len(table.Columns), row, len(row))
}
values := make([]interface{}, 0, len(indexes))
for _, index := range indexes {
values = append(values, row[index])
}
return values, nil
}
// Get term column's value
func GetColumnValue(table *schema.Table, column string, row []interface{}) (interface{}, error) {
index := table.FindColumn(column)
if index == -1 {
return nil, errors.Errorf("table %s has no column name %s", table, column)
}
return row[index], nil
}
// String implements fmt.Stringer interface.
func (r *RowsEvent) String() string {
return fmt.Sprintf("%s %s %v", r.Action, r.Table, r.Rows)
}

228
vendor/github.com/siddontang/go-mysql/canal/sync.go generated vendored Normal file
View File

@@ -0,0 +1,228 @@
package canal
import (
"fmt"
"regexp"
"time"
"github.com/juju/errors"
"github.com/satori/go.uuid"
"github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/replication"
"github.com/siddontang/go-mysql/schema"
log "github.com/sirupsen/logrus"
)
var (
expCreateTable = regexp.MustCompile("(?i)^CREATE\\sTABLE(\\sIF\\sNOT\\sEXISTS)?\\s`{0,1}(.*?)`{0,1}\\.{0,1}`{0,1}([^`\\.]+?)`{0,1}\\s.*")
expAlterTable = regexp.MustCompile("(?i)^ALTER\\sTABLE\\s.*?`{0,1}(.*?)`{0,1}\\.{0,1}`{0,1}([^`\\.]+?)`{0,1}\\s.*")
expRenameTable = regexp.MustCompile("(?i)^RENAME\\sTABLE\\s.*?`{0,1}(.*?)`{0,1}\\.{0,1}`{0,1}([^`\\.]+?)`{0,1}\\s{1,}TO\\s.*?")
expDropTable = regexp.MustCompile("(?i)^DROP\\sTABLE(\\sIF\\sEXISTS){0,1}\\s`{0,1}(.*?)`{0,1}\\.{0,1}`{0,1}([^`\\.]+?)`{0,1}($|\\s)")
)
func (c *Canal) startSyncer() (*replication.BinlogStreamer, error) {
if !c.useGTID {
pos := c.master.Position()
s, err := c.syncer.StartSync(pos)
if err != nil {
return nil, errors.Errorf("start sync replication at binlog %v error %v", pos, err)
}
log.Infof("start sync binlog at binlog file %v", pos)
return s, nil
} else {
gset := c.master.GTID()
s, err := c.syncer.StartSyncGTID(gset)
if err != nil {
return nil, errors.Errorf("start sync replication at GTID %v error %v", gset, err)
}
log.Infof("start sync binlog at GTID %v", gset)
return s, nil
}
}
func (c *Canal) runSyncBinlog() error {
s, err := c.startSyncer()
if err != nil {
return err
}
savePos := false
force := false
for {
ev, err := s.GetEvent(c.ctx)
if err != nil {
return errors.Trace(err)
}
savePos = false
force = false
pos := c.master.Position()
curPos := pos.Pos
//next binlog pos
pos.Pos = ev.Header.LogPos
// We only save position with RotateEvent and XIDEvent.
// For RowsEvent, we can't save the position until meeting XIDEvent
// which tells the whole transaction is over.
// TODO: If we meet any DDL query, we must save too.
switch e := ev.Event.(type) {
case *replication.RotateEvent:
pos.Name = string(e.NextLogName)
pos.Pos = uint32(e.Position)
log.Infof("rotate binlog to %s", pos)
savePos = true
force = true
if err = c.eventHandler.OnRotate(e); err != nil {
return errors.Trace(err)
}
case *replication.RowsEvent:
// we only focus row based event
err = c.handleRowsEvent(ev)
if err != nil {
e := errors.Cause(err)
// if error is not ErrExcludedTable or ErrTableNotExist or ErrMissingTableMeta, stop canal
if e != ErrExcludedTable &&
e != schema.ErrTableNotExist &&
e != schema.ErrMissingTableMeta {
log.Errorf("handle rows event at (%s, %d) error %v", pos.Name, curPos, err)
return errors.Trace(err)
}
}
continue
case *replication.XIDEvent:
savePos = true
// try to save the position later
if err := c.eventHandler.OnXID(pos); err != nil {
return errors.Trace(err)
}
case *replication.MariadbGTIDEvent:
// try to save the GTID later
gtid := &e.GTID
c.master.UpdateGTID(gtid)
if err := c.eventHandler.OnGTID(gtid); err != nil {
return errors.Trace(err)
}
case *replication.GTIDEvent:
u, _ := uuid.FromBytes(e.SID)
gset, err := mysql.ParseMysqlGTIDSet(fmt.Sprintf("%s:%d", u.String(), e.GNO))
if err != nil {
return errors.Trace(err)
}
c.master.UpdateGTID(gset)
if err := c.eventHandler.OnGTID(gset); err != nil {
return errors.Trace(err)
}
case *replication.QueryEvent:
var (
mb [][]byte
schema []byte
table []byte
)
regexps := []regexp.Regexp{*expCreateTable, *expAlterTable, *expRenameTable, *expDropTable}
for _, reg := range regexps {
mb = reg.FindSubmatch(e.Query)
if len(mb) != 0 {
break
}
}
mbLen := len(mb)
if mbLen == 0 {
continue
}
// the first last is table name, the second last is database name(if exists)
if len(mb[mbLen-2]) == 0 {
schema = e.Schema
} else {
schema = mb[mbLen-2]
}
table = mb[mbLen-1]
savePos = true
force = true
c.ClearTableCache(schema, table)
log.Infof("table structure changed, clear table cache: %s.%s\n", schema, table)
if err = c.eventHandler.OnDDL(pos, e); err != nil {
return errors.Trace(err)
}
default:
continue
}
if savePos {
c.master.Update(pos)
c.eventHandler.OnPosSynced(pos, force)
}
}
return nil
}
func (c *Canal) handleRowsEvent(e *replication.BinlogEvent) error {
ev := e.Event.(*replication.RowsEvent)
// Caveat: table may be altered at runtime.
schema := string(ev.Table.Schema)
table := string(ev.Table.Table)
t, err := c.GetTable(schema, table)
if err != nil {
return err
}
var action string
switch e.Header.EventType {
case replication.WRITE_ROWS_EVENTv1, replication.WRITE_ROWS_EVENTv2:
action = InsertAction
case replication.DELETE_ROWS_EVENTv1, replication.DELETE_ROWS_EVENTv2:
action = DeleteAction
case replication.UPDATE_ROWS_EVENTv1, replication.UPDATE_ROWS_EVENTv2:
action = UpdateAction
default:
return errors.Errorf("%s not supported now", e.Header.EventType)
}
events := newRowsEvent(t, action, ev.Rows)
return c.eventHandler.OnRow(events)
}
func (c *Canal) WaitUntilPos(pos mysql.Position, timeout time.Duration) error {
timer := time.NewTimer(timeout)
for {
select {
case <-timer.C:
return errors.Errorf("wait position %v too long > %s", pos, timeout)
default:
curPos := c.master.Position()
if curPos.Compare(pos) >= 0 {
return nil
} else {
log.Debugf("master pos is %v, wait catching %v", curPos, pos)
time.Sleep(100 * time.Millisecond)
}
}
}
return nil
}
func (c *Canal) GetMasterPos() (mysql.Position, error) {
rr, err := c.Execute("SHOW MASTER STATUS")
if err != nil {
return mysql.Position{"", 0}, errors.Trace(err)
}
name, _ := rr.GetString(0, 0)
pos, _ := rr.GetInt(0, 1)
return mysql.Position{name, uint32(pos)}, nil
}
func (c *Canal) CatchMasterPos(timeout time.Duration) error {
pos, err := c.GetMasterPos()
if err != nil {
return errors.Trace(err)
}
return c.WaitUntilPos(pos, timeout)
}

View File

@@ -0,0 +1,35 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"auth.go",
"conn.go",
"req.go",
"resp.go",
"stmt.go",
],
importmap = "go-common/vendor/github.com/siddontang/go-mysql/client",
importpath = "github.com/siddontang/go-mysql/client",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/juju/errors:go_default_library",
"//vendor/github.com/siddontang/go-mysql/mysql:go_default_library",
"//vendor/github.com/siddontang/go-mysql/packet:go_default_library",
"//vendor/github.com/siddontang/go/hack:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

174
vendor/github.com/siddontang/go-mysql/client/auth.go generated vendored Normal file
View File

@@ -0,0 +1,174 @@
package client
import (
"bytes"
"crypto/tls"
"encoding/binary"
"github.com/juju/errors"
. "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/packet"
)
func (c *Conn) readInitialHandshake() error {
data, err := c.ReadPacket()
if err != nil {
return errors.Trace(err)
}
if data[0] == ERR_HEADER {
return errors.New("read initial handshake error")
}
if data[0] < MinProtocolVersion {
return errors.Errorf("invalid protocol version %d, must >= 10", data[0])
}
//skip mysql version
//mysql version end with 0x00
pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1
//connection id length is 4
c.connectionID = uint32(binary.LittleEndian.Uint32(data[pos : pos+4]))
pos += 4
c.salt = []byte{}
c.salt = append(c.salt, data[pos:pos+8]...)
//skip filter
pos += 8 + 1
//capability lower 2 bytes
c.capability = uint32(binary.LittleEndian.Uint16(data[pos : pos+2]))
pos += 2
if len(data) > pos {
//skip server charset
//c.charset = data[pos]
pos += 1
c.status = binary.LittleEndian.Uint16(data[pos : pos+2])
pos += 2
c.capability = uint32(binary.LittleEndian.Uint16(data[pos:pos+2]))<<16 | c.capability
pos += 2
//skip auth data len or [00]
//skip reserved (all [00])
pos += 10 + 1
// The documentation is ambiguous about the length.
// The official Python library uses the fixed length 12
// mysql-proxy also use 12
// which is not documented but seems to work.
c.salt = append(c.salt, data[pos:pos+12]...)
}
return nil
}
func (c *Conn) writeAuthHandshake() error {
// Adjust client capability flags based on server support
capability := CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION |
CLIENT_LONG_PASSWORD | CLIENT_TRANSACTIONS | CLIENT_LONG_FLAG
// To enable TLS / SSL
if c.TLSConfig != nil {
capability |= CLIENT_PLUGIN_AUTH
capability |= CLIENT_SSL
}
capability &= c.capability
//packet length
//capbility 4
//max-packet size 4
//charset 1
//reserved all[0] 23
length := 4 + 4 + 1 + 23
//username
length += len(c.user) + 1
//we only support secure connection
auth := CalcPassword(c.salt, []byte(c.password))
length += 1 + len(auth)
if len(c.db) > 0 {
capability |= CLIENT_CONNECT_WITH_DB
length += len(c.db) + 1
}
// mysql_native_password + null-terminated
length += 21 + 1
c.capability = capability
data := make([]byte, length+4)
//capability [32 bit]
data[4] = byte(capability)
data[5] = byte(capability >> 8)
data[6] = byte(capability >> 16)
data[7] = byte(capability >> 24)
//MaxPacketSize [32 bit] (none)
//data[8] = 0x00
//data[9] = 0x00
//data[10] = 0x00
//data[11] = 0x00
//Charset [1 byte]
//use default collation id 33 here, is utf-8
data[12] = byte(DEFAULT_COLLATION_ID)
// SSL Connection Request Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
if c.TLSConfig != nil {
// Send TLS / SSL request packet
if err := c.WritePacket(data[:(4+4+1+23)+4]); err != nil {
return err
}
// Switch to TLS
tlsConn := tls.Client(c.Conn.Conn, c.TLSConfig)
if err := tlsConn.Handshake(); err != nil {
return err
}
currentSequence := c.Sequence
c.Conn = packet.NewConn(tlsConn)
c.Sequence = currentSequence
}
//Filler [23 bytes] (all 0x00)
pos := 13 + 23
//User [null terminated string]
if len(c.user) > 0 {
pos += copy(data[pos:], c.user)
}
data[pos] = 0x00
pos++
// auth [length encoded integer]
data[pos] = byte(len(auth))
pos += 1 + copy(data[pos+1:], auth)
// db [null terminated string]
if len(c.db) > 0 {
pos += copy(data[pos:], c.db)
data[pos] = 0x00
pos++
}
// Assume native client during response
pos += copy(data[pos:], "mysql_native_password")
data[pos] = 0x00
return c.WritePacket(data)
}

254
vendor/github.com/siddontang/go-mysql/client/conn.go generated vendored Normal file
View File

@@ -0,0 +1,254 @@
package client
import (
"crypto/tls"
"fmt"
"net"
"strings"
"time"
"github.com/juju/errors"
. "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go-mysql/packet"
)
type Conn struct {
*packet.Conn
user string
password string
db string
TLSConfig *tls.Config
capability uint32
status uint16
charset string
salt []byte
connectionID uint32
}
func getNetProto(addr string) string {
proto := "tcp"
if strings.Contains(addr, "/") {
proto = "unix"
}
return proto
}
// Connect to a MySQL server, addr can be ip:port, or a unix socket domain like /var/sock.
// Accepts a series of configuration functions as a variadic argument.
func Connect(addr string, user string, password string, dbName string, options ...func(*Conn)) (*Conn, error) {
proto := getNetProto(addr)
c := new(Conn)
var err error
conn, err := net.DialTimeout(proto, addr, 10*time.Second)
if err != nil {
return nil, errors.Trace(err)
}
c.Conn = packet.NewConn(conn)
c.user = user
c.password = password
c.db = dbName
//use default charset here, utf-8
c.charset = DEFAULT_CHARSET
// Apply configuration functions.
for i := range options {
options[i](c)
}
if err = c.handshake(); err != nil {
return nil, errors.Trace(err)
}
return c, nil
}
func (c *Conn) handshake() error {
var err error
if err = c.readInitialHandshake(); err != nil {
c.Close()
return errors.Trace(err)
}
if err := c.writeAuthHandshake(); err != nil {
c.Close()
return errors.Trace(err)
}
if _, err := c.readOK(); err != nil {
c.Close()
return errors.Trace(err)
}
return nil
}
func (c *Conn) Close() error {
return c.Conn.Close()
}
func (c *Conn) Ping() error {
if err := c.writeCommand(COM_PING); err != nil {
return errors.Trace(err)
}
if _, err := c.readOK(); err != nil {
return errors.Trace(err)
}
return nil
}
func (c *Conn) UseDB(dbName string) error {
if c.db == dbName {
return nil
}
if err := c.writeCommandStr(COM_INIT_DB, dbName); err != nil {
return errors.Trace(err)
}
if _, err := c.readOK(); err != nil {
return errors.Trace(err)
}
c.db = dbName
return nil
}
func (c *Conn) GetDB() string {
return c.db
}
func (c *Conn) Execute(command string, args ...interface{}) (*Result, error) {
if len(args) == 0 {
return c.exec(command)
} else {
if s, err := c.Prepare(command); err != nil {
return nil, errors.Trace(err)
} else {
var r *Result
r, err = s.Execute(args...)
s.Close()
return r, err
}
}
}
func (c *Conn) Begin() error {
_, err := c.exec("BEGIN")
return errors.Trace(err)
}
func (c *Conn) Commit() error {
_, err := c.exec("COMMIT")
return errors.Trace(err)
}
func (c *Conn) Rollback() error {
_, err := c.exec("ROLLBACK")
return errors.Trace(err)
}
func (c *Conn) SetCharset(charset string) error {
if c.charset == charset {
return nil
}
if _, err := c.exec(fmt.Sprintf("SET NAMES %s", charset)); err != nil {
return errors.Trace(err)
} else {
c.charset = charset
return nil
}
}
func (c *Conn) FieldList(table string, wildcard string) ([]*Field, error) {
if err := c.writeCommandStrStr(COM_FIELD_LIST, table, wildcard); err != nil {
return nil, errors.Trace(err)
}
data, err := c.ReadPacket()
if err != nil {
return nil, errors.Trace(err)
}
fs := make([]*Field, 0, 4)
var f *Field
if data[0] == ERR_HEADER {
return nil, c.handleErrorPacket(data)
} else {
for {
if data, err = c.ReadPacket(); err != nil {
return nil, errors.Trace(err)
}
// EOF Packet
if c.isEOFPacket(data) {
return fs, nil
}
if f, err = FieldData(data).Parse(); err != nil {
return nil, errors.Trace(err)
}
fs = append(fs, f)
}
}
return nil, fmt.Errorf("field list error")
}
func (c *Conn) SetAutoCommit() error {
if !c.IsAutoCommit() {
if _, err := c.exec("SET AUTOCOMMIT = 1"); err != nil {
return errors.Trace(err)
}
}
return nil
}
func (c *Conn) IsAutoCommit() bool {
return c.status&SERVER_STATUS_AUTOCOMMIT > 0
}
func (c *Conn) IsInTransaction() bool {
return c.status&SERVER_STATUS_IN_TRANS > 0
}
func (c *Conn) GetCharset() string {
return c.charset
}
func (c *Conn) GetConnectionID() uint32 {
return c.connectionID
}
func (c *Conn) HandleOKPacket(data []byte) *Result {
r, _ := c.handleOKPacket(data)
return r
}
func (c *Conn) HandleErrorPacket(data []byte) error {
return c.handleErrorPacket(data)
}
func (c *Conn) ReadOKPacket() (*Result, error) {
return c.readOK()
}
func (c *Conn) exec(query string) (*Result, error) {
if err := c.writeCommandStr(COM_QUERY, query); err != nil {
return nil, errors.Trace(err)
}
return c.readResult(false)
}

72
vendor/github.com/siddontang/go-mysql/client/req.go generated vendored Normal file
View File

@@ -0,0 +1,72 @@
package client
func (c *Conn) writeCommand(command byte) error {
c.ResetSequence()
return c.WritePacket([]byte{
0x01, //1 bytes long
0x00,
0x00,
0x00, //sequence
command,
})
}
func (c *Conn) writeCommandBuf(command byte, arg []byte) error {
c.ResetSequence()
length := len(arg) + 1
data := make([]byte, length+4)
data[4] = command
copy(data[5:], arg)
return c.WritePacket(data)
}
func (c *Conn) writeCommandStr(command byte, arg string) error {
c.ResetSequence()
length := len(arg) + 1
data := make([]byte, length+4)
data[4] = command
copy(data[5:], arg)
return c.WritePacket(data)
}
func (c *Conn) writeCommandUint32(command byte, arg uint32) error {
c.ResetSequence()
return c.WritePacket([]byte{
0x05, //5 bytes long
0x00,
0x00,
0x00, //sequence
command,
byte(arg),
byte(arg >> 8),
byte(arg >> 16),
byte(arg >> 24),
})
}
func (c *Conn) writeCommandStrStr(command byte, arg1 string, arg2 string) error {
c.ResetSequence()
data := make([]byte, 4, 6+len(arg1)+len(arg2))
data = append(data, command)
data = append(data, arg1...)
data = append(data, 0)
data = append(data, arg2...)
return c.WritePacket(data)
}

218
vendor/github.com/siddontang/go-mysql/client/resp.go generated vendored Normal file
View File

@@ -0,0 +1,218 @@
package client
import (
"encoding/binary"
"github.com/juju/errors"
. "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go/hack"
)
func (c *Conn) readUntilEOF() (err error) {
var data []byte
for {
data, err = c.ReadPacket()
if err != nil {
return
}
// EOF Packet
if c.isEOFPacket(data) {
return
}
}
return
}
func (c *Conn) isEOFPacket(data []byte) bool {
return data[0] == EOF_HEADER && len(data) <= 5
}
func (c *Conn) handleOKPacket(data []byte) (*Result, error) {
var n int
var pos int = 1
r := new(Result)
r.AffectedRows, _, n = LengthEncodedInt(data[pos:])
pos += n
r.InsertId, _, n = LengthEncodedInt(data[pos:])
pos += n
if c.capability&CLIENT_PROTOCOL_41 > 0 {
r.Status = binary.LittleEndian.Uint16(data[pos:])
c.status = r.Status
pos += 2
//todo:strict_mode, check warnings as error
//Warnings := binary.LittleEndian.Uint16(data[pos:])
//pos += 2
} else if c.capability&CLIENT_TRANSACTIONS > 0 {
r.Status = binary.LittleEndian.Uint16(data[pos:])
c.status = r.Status
pos += 2
}
//new ok package will check CLIENT_SESSION_TRACK too, but I don't support it now.
//skip info
return r, nil
}
func (c *Conn) handleErrorPacket(data []byte) error {
e := new(MyError)
var pos int = 1
e.Code = binary.LittleEndian.Uint16(data[pos:])
pos += 2
if c.capability&CLIENT_PROTOCOL_41 > 0 {
//skip '#'
pos++
e.State = hack.String(data[pos : pos+5])
pos += 5
}
e.Message = hack.String(data[pos:])
return e
}
func (c *Conn) readOK() (*Result, error) {
data, err := c.ReadPacket()
if err != nil {
return nil, errors.Trace(err)
}
if data[0] == OK_HEADER {
return c.handleOKPacket(data)
} else if data[0] == ERR_HEADER {
return nil, c.handleErrorPacket(data)
} else {
return nil, errors.New("invalid ok packet")
}
}
func (c *Conn) readResult(binary bool) (*Result, error) {
data, err := c.ReadPacket()
if err != nil {
return nil, errors.Trace(err)
}
if data[0] == OK_HEADER {
return c.handleOKPacket(data)
} else if data[0] == ERR_HEADER {
return nil, c.handleErrorPacket(data)
} else if data[0] == LocalInFile_HEADER {
return nil, ErrMalformPacket
}
return c.readResultset(data, binary)
}
func (c *Conn) readResultset(data []byte, binary bool) (*Result, error) {
result := &Result{
Status: 0,
InsertId: 0,
AffectedRows: 0,
Resultset: &Resultset{},
}
// column count
count, _, n := LengthEncodedInt(data)
if n-len(data) != 0 {
return nil, ErrMalformPacket
}
result.Fields = make([]*Field, count)
result.FieldNames = make(map[string]int, count)
if err := c.readResultColumns(result); err != nil {
return nil, errors.Trace(err)
}
if err := c.readResultRows(result, binary); err != nil {
return nil, errors.Trace(err)
}
return result, nil
}
func (c *Conn) readResultColumns(result *Result) (err error) {
var i int = 0
var data []byte
for {
data, err = c.ReadPacket()
if err != nil {
return
}
// EOF Packet
if c.isEOFPacket(data) {
if c.capability&CLIENT_PROTOCOL_41 > 0 {
//result.Warnings = binary.LittleEndian.Uint16(data[1:])
//todo add strict_mode, warning will be treat as error
result.Status = binary.LittleEndian.Uint16(data[3:])
c.status = result.Status
}
if i != len(result.Fields) {
err = ErrMalformPacket
}
return
}
result.Fields[i], err = FieldData(data).Parse()
if err != nil {
return
}
result.FieldNames[hack.String(result.Fields[i].Name)] = i
i++
}
}
func (c *Conn) readResultRows(result *Result, isBinary bool) (err error) {
var data []byte
for {
data, err = c.ReadPacket()
if err != nil {
return
}
// EOF Packet
if c.isEOFPacket(data) {
if c.capability&CLIENT_PROTOCOL_41 > 0 {
//result.Warnings = binary.LittleEndian.Uint16(data[1:])
//todo add strict_mode, warning will be treat as error
result.Status = binary.LittleEndian.Uint16(data[3:])
c.status = result.Status
}
break
}
result.RowDatas = append(result.RowDatas, data)
}
result.Values = make([][]interface{}, len(result.RowDatas))
for i := range result.Values {
result.Values[i], err = result.RowDatas[i].Parse(result.Fields, isBinary)
if err != nil {
return errors.Trace(err)
}
}
return nil
}

215
vendor/github.com/siddontang/go-mysql/client/stmt.go generated vendored Normal file
View File

@@ -0,0 +1,215 @@
package client
import (
"encoding/binary"
"fmt"
"math"
"github.com/juju/errors"
. "github.com/siddontang/go-mysql/mysql"
)
type Stmt struct {
conn *Conn
id uint32
query string
params int
columns int
}
func (s *Stmt) ParamNum() int {
return s.params
}
func (s *Stmt) ColumnNum() int {
return s.columns
}
func (s *Stmt) Execute(args ...interface{}) (*Result, error) {
if err := s.write(args...); err != nil {
return nil, errors.Trace(err)
}
return s.conn.readResult(true)
}
func (s *Stmt) Close() error {
if err := s.conn.writeCommandUint32(COM_STMT_CLOSE, s.id); err != nil {
return errors.Trace(err)
}
return nil
}
func (s *Stmt) write(args ...interface{}) error {
paramsNum := s.params
if len(args) != paramsNum {
return fmt.Errorf("argument mismatch, need %d but got %d", s.params, len(args))
}
paramTypes := make([]byte, paramsNum<<1)
paramValues := make([][]byte, paramsNum)
//NULL-bitmap, length: (num-params+7)
nullBitmap := make([]byte, (paramsNum+7)>>3)
var length int = int(1 + 4 + 1 + 4 + ((paramsNum + 7) >> 3) + 1 + (paramsNum << 1))
var newParamBoundFlag byte = 0
for i := range args {
if args[i] == nil {
nullBitmap[i/8] |= (1 << (uint(i) % 8))
paramTypes[i<<1] = MYSQL_TYPE_NULL
continue
}
newParamBoundFlag = 1
switch v := args[i].(type) {
case int8:
paramTypes[i<<1] = MYSQL_TYPE_TINY
paramValues[i] = []byte{byte(v)}
case int16:
paramTypes[i<<1] = MYSQL_TYPE_SHORT
paramValues[i] = Uint16ToBytes(uint16(v))
case int32:
paramTypes[i<<1] = MYSQL_TYPE_LONG
paramValues[i] = Uint32ToBytes(uint32(v))
case int:
paramTypes[i<<1] = MYSQL_TYPE_LONGLONG
paramValues[i] = Uint64ToBytes(uint64(v))
case int64:
paramTypes[i<<1] = MYSQL_TYPE_LONGLONG
paramValues[i] = Uint64ToBytes(uint64(v))
case uint8:
paramTypes[i<<1] = MYSQL_TYPE_TINY
paramTypes[(i<<1)+1] = 0x80
paramValues[i] = []byte{v}
case uint16:
paramTypes[i<<1] = MYSQL_TYPE_SHORT
paramTypes[(i<<1)+1] = 0x80
paramValues[i] = Uint16ToBytes(uint16(v))
case uint32:
paramTypes[i<<1] = MYSQL_TYPE_LONG
paramTypes[(i<<1)+1] = 0x80
paramValues[i] = Uint32ToBytes(uint32(v))
case uint:
paramTypes[i<<1] = MYSQL_TYPE_LONGLONG
paramTypes[(i<<1)+1] = 0x80
paramValues[i] = Uint64ToBytes(uint64(v))
case uint64:
paramTypes[i<<1] = MYSQL_TYPE_LONGLONG
paramTypes[(i<<1)+1] = 0x80
paramValues[i] = Uint64ToBytes(uint64(v))
case bool:
paramTypes[i<<1] = MYSQL_TYPE_TINY
if v {
paramValues[i] = []byte{1}
} else {
paramValues[i] = []byte{0}
}
case float32:
paramTypes[i<<1] = MYSQL_TYPE_FLOAT
paramValues[i] = Uint32ToBytes(math.Float32bits(v))
case float64:
paramTypes[i<<1] = MYSQL_TYPE_DOUBLE
paramValues[i] = Uint64ToBytes(math.Float64bits(v))
case string:
paramTypes[i<<1] = MYSQL_TYPE_STRING
paramValues[i] = append(PutLengthEncodedInt(uint64(len(v))), v...)
case []byte:
paramTypes[i<<1] = MYSQL_TYPE_STRING
paramValues[i] = append(PutLengthEncodedInt(uint64(len(v))), v...)
default:
return fmt.Errorf("invalid argument type %T", args[i])
}
length += len(paramValues[i])
}
data := make([]byte, 4, 4+length)
data = append(data, COM_STMT_EXECUTE)
data = append(data, byte(s.id), byte(s.id>>8), byte(s.id>>16), byte(s.id>>24))
//flag: CURSOR_TYPE_NO_CURSOR
data = append(data, 0x00)
//iteration-count, always 1
data = append(data, 1, 0, 0, 0)
if s.params > 0 {
data = append(data, nullBitmap...)
//new-params-bound-flag
data = append(data, newParamBoundFlag)
if newParamBoundFlag == 1 {
//type of each parameter, length: num-params * 2
data = append(data, paramTypes...)
//value of each parameter
for _, v := range paramValues {
data = append(data, v...)
}
}
}
s.conn.ResetSequence()
return s.conn.WritePacket(data)
}
func (c *Conn) Prepare(query string) (*Stmt, error) {
if err := c.writeCommandStr(COM_STMT_PREPARE, query); err != nil {
return nil, errors.Trace(err)
}
data, err := c.ReadPacket()
if err != nil {
return nil, errors.Trace(err)
}
if data[0] == ERR_HEADER {
return nil, c.handleErrorPacket(data)
} else if data[0] != OK_HEADER {
return nil, ErrMalformPacket
}
s := new(Stmt)
s.conn = c
pos := 1
//for statement id
s.id = binary.LittleEndian.Uint32(data[pos:])
pos += 4
//number columns
s.columns = int(binary.LittleEndian.Uint16(data[pos:]))
pos += 2
//number params
s.params = int(binary.LittleEndian.Uint16(data[pos:]))
pos += 2
//warnings
//warnings = binary.LittleEndian.Uint16(data[pos:])
if s.params > 0 {
if err := s.conn.readUntilEOF(); err != nil {
return nil, errors.Trace(err)
}
}
if s.columns > 0 {
if err := s.conn.readUntilEOF(); err != nil {
return nil, errors.Trace(err)
}
}
return s, nil
}

30
vendor/github.com/siddontang/go-mysql/dump/BUILD.bazel generated vendored Normal file
View File

@@ -0,0 +1,30 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"dump.go",
"parser.go",
],
importmap = "go-common/vendor/github.com/siddontang/go-mysql/dump",
importpath = "github.com/siddontang/go-mysql/dump",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/juju/errors:go_default_library",
"//vendor/github.com/siddontang/go-mysql/mysql:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

194
vendor/github.com/siddontang/go-mysql/dump/dump.go generated vendored Normal file
View File

@@ -0,0 +1,194 @@
package dump
import (
"fmt"
"io"
"os"
"os/exec"
"strings"
"github.com/juju/errors"
. "github.com/siddontang/go-mysql/mysql"
)
// Unlick mysqldump, Dumper is designed for parsing and syning data easily.
type Dumper struct {
// mysqldump execution path, like mysqldump or /usr/bin/mysqldump, etc...
ExecutionPath string
Addr string
User string
Password string
// Will override Databases
Tables []string
TableDB string
Databases []string
Charset string
IgnoreTables map[string][]string
ErrOut io.Writer
masterDataSkipped bool
maxAllowedPacket int
}
func NewDumper(executionPath string, addr string, user string, password string) (*Dumper, error) {
if len(executionPath) == 0 {
return nil, nil
}
path, err := exec.LookPath(executionPath)
if err != nil {
return nil, errors.Trace(err)
}
d := new(Dumper)
d.ExecutionPath = path
d.Addr = addr
d.User = user
d.Password = password
d.Tables = make([]string, 0, 16)
d.Databases = make([]string, 0, 16)
d.Charset = DEFAULT_CHARSET
d.IgnoreTables = make(map[string][]string)
d.masterDataSkipped = false
d.ErrOut = os.Stderr
return d, nil
}
func (d *Dumper) SetCharset(charset string) {
d.Charset = charset
}
func (d *Dumper) SetErrOut(o io.Writer) {
d.ErrOut = o
}
// In some cloud MySQL, we have no privilege to use `--master-data`.
func (d *Dumper) SkipMasterData(v bool) {
d.masterDataSkipped = v
}
func (d *Dumper) SetMaxAllowedPacket(i int) {
d.maxAllowedPacket = i
}
func (d *Dumper) AddDatabases(dbs ...string) {
d.Databases = append(d.Databases, dbs...)
}
func (d *Dumper) AddTables(db string, tables ...string) {
if d.TableDB != db {
d.TableDB = db
d.Tables = d.Tables[0:0]
}
d.Tables = append(d.Tables, tables...)
}
func (d *Dumper) AddIgnoreTables(db string, tables ...string) {
t, _ := d.IgnoreTables[db]
t = append(t, tables...)
d.IgnoreTables[db] = t
}
func (d *Dumper) Reset() {
d.Tables = d.Tables[0:0]
d.TableDB = ""
d.IgnoreTables = make(map[string][]string)
d.Databases = d.Databases[0:0]
}
func (d *Dumper) Dump(w io.Writer) error {
args := make([]string, 0, 16)
// Common args
seps := strings.Split(d.Addr, ":")
args = append(args, fmt.Sprintf("--host=%s", seps[0]))
if len(seps) > 1 {
args = append(args, fmt.Sprintf("--port=%s", seps[1]))
}
args = append(args, fmt.Sprintf("--user=%s", d.User))
args = append(args, fmt.Sprintf("--password=%s", d.Password))
if !d.masterDataSkipped {
args = append(args, "--master-data")
}
if d.maxAllowedPacket > 0 {
// mysqldump param should be --max-allowed-packet=%dM not be --max_allowed_packet=%dM
args = append(args, fmt.Sprintf("--max-allowed-packet=%dM", d.maxAllowedPacket))
}
args = append(args, "--single-transaction")
args = append(args, "--skip-lock-tables")
// Disable uncessary data
args = append(args, "--compact")
args = append(args, "--skip-opt")
args = append(args, "--quick")
// We only care about data
args = append(args, "--no-create-info")
// Multi row is easy for us to parse the data
args = append(args, "--skip-extended-insert")
for db, tables := range d.IgnoreTables {
for _, table := range tables {
args = append(args, fmt.Sprintf("--ignore-table=%s.%s", db, table))
}
}
if len(d.Tables) == 0 && len(d.Databases) == 0 {
args = append(args, "--all-databases")
} else if len(d.Tables) == 0 {
args = append(args, "--databases")
args = append(args, d.Databases...)
} else {
args = append(args, d.TableDB)
args = append(args, d.Tables...)
// If we only dump some tables, the dump data will not have database name
// which makes us hard to parse, so here we add it manually.
w.Write([]byte(fmt.Sprintf("USE `%s`;\n", d.TableDB)))
}
if len(d.Charset) != 0 {
args = append(args, fmt.Sprintf("--default-character-set=%s", d.Charset))
}
cmd := exec.Command(d.ExecutionPath, args...)
cmd.Stderr = d.ErrOut
cmd.Stdout = w
return cmd.Run()
}
// Dump MySQL and parse immediately
func (d *Dumper) DumpAndParse(h ParseHandler) error {
r, w := io.Pipe()
done := make(chan error, 1)
go func() {
err := Parse(r, h, !d.masterDataSkipped)
r.CloseWithError(err)
done <- err
}()
err := d.Dump(w)
w.CloseWithError(err)
err = <-done
return errors.Trace(err)
}

190
vendor/github.com/siddontang/go-mysql/dump/parser.go generated vendored Normal file
View File

@@ -0,0 +1,190 @@
package dump
import (
"bufio"
"fmt"
"io"
"regexp"
"strconv"
"strings"
"github.com/juju/errors"
"github.com/siddontang/go-mysql/mysql"
)
var (
ErrSkip = errors.New("Handler error, but skipped")
)
type ParseHandler interface {
// Parse CHANGE MASTER TO MASTER_LOG_FILE=name, MASTER_LOG_POS=pos;
BinLog(name string, pos uint64) error
Data(schema string, table string, values []string) error
}
var binlogExp *regexp.Regexp
var useExp *regexp.Regexp
var valuesExp *regexp.Regexp
func init() {
binlogExp = regexp.MustCompile("^CHANGE MASTER TO MASTER_LOG_FILE='(.+)', MASTER_LOG_POS=(\\d+);")
useExp = regexp.MustCompile("^USE `(.+)`;")
valuesExp = regexp.MustCompile("^INSERT INTO `(.+?)` VALUES \\((.+)\\);$")
}
// Parse the dump data with Dumper generate.
// It can not parse all the data formats with mysqldump outputs
func Parse(r io.Reader, h ParseHandler, parseBinlogPos bool) error {
rb := bufio.NewReaderSize(r, 1024*16)
var db string
var binlogParsed bool
for {
line, err := rb.ReadString('\n')
if err != nil && err != io.EOF {
return errors.Trace(err)
} else if mysql.ErrorEqual(err, io.EOF) {
break
}
// Ignore '\n' on Linux or '\r\n' on Windows
line = strings.SplitAfter(line, ";")[0]
if parseBinlogPos && !binlogParsed {
if m := binlogExp.FindAllStringSubmatch(line, -1); len(m) == 1 {
name := m[0][1]
pos, err := strconv.ParseUint(m[0][2], 10, 64)
if err != nil {
return errors.Errorf("parse binlog %v err, invalid number", line)
}
if err = h.BinLog(name, pos); err != nil && err != ErrSkip {
return errors.Trace(err)
}
binlogParsed = true
}
}
if m := useExp.FindAllStringSubmatch(line, -1); len(m) == 1 {
db = m[0][1]
}
if m := valuesExp.FindAllStringSubmatch(line, -1); len(m) == 1 {
table := m[0][1]
values, err := parseValues(m[0][2])
if err != nil {
return errors.Errorf("parse values %v err", line)
}
if err = h.Data(db, table, values); err != nil && err != ErrSkip {
return errors.Trace(err)
}
}
}
return nil
}
func parseValues(str string) ([]string, error) {
// values are seperated by comma, but we can not split using comma directly
// string is enclosed by single quote
// a simple implementation, may be more robust later.
values := make([]string, 0, 8)
i := 0
for i < len(str) {
if str[i] != '\'' {
// no string, read until comma
j := i + 1
for ; j < len(str) && str[j] != ','; j++ {
}
values = append(values, str[i:j])
// skip ,
i = j + 1
} else {
// read string until another single quote
j := i + 1
escaped := false
for j < len(str) {
if str[j] == '\\' {
// skip escaped character
j += 2
escaped = true
continue
} else if str[j] == '\'' {
break
} else {
j++
}
}
if j >= len(str) {
return nil, fmt.Errorf("parse quote values error")
}
value := str[i : j+1]
if escaped {
value = unescapeString(value)
}
values = append(values, value)
// skip ' and ,
i = j + 2
}
// need skip blank???
}
return values, nil
}
// unescapeString un-escapes the string.
// mysqldump will escape the string when dumps,
// Refer http://dev.mysql.com/doc/refman/5.7/en/string-literals.html
func unescapeString(s string) string {
i := 0
value := make([]byte, 0, len(s))
for i < len(s) {
if s[i] == '\\' {
j := i + 1
if j == len(s) {
// The last char is \, remove
break
}
value = append(value, unescapeChar(s[j]))
i += 2
} else {
value = append(value, s[i])
i++
}
}
return string(value)
}
func unescapeChar(ch byte) byte {
// \" \' \\ \n \0 \b \Z \r \t ==> escape to one char
switch ch {
case 'n':
ch = '\n'
case '0':
ch = 0
case 'b':
ch = 8
case 'Z':
ch = 26
case 'r':
ch = '\r'
case 't':
ch = '\t'
}
return ch
}

View File

@@ -0,0 +1,44 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"const.go",
"errcode.go",
"errname.go",
"error.go",
"field.go",
"gtid.go",
"mariadb_gtid.go",
"mysql_gtid.go",
"parse_binary.go",
"position.go",
"result.go",
"resultset.go",
"resultset_helper.go",
"state.go",
"util.go",
],
importmap = "go-common/vendor/github.com/siddontang/go-mysql/mysql",
importpath = "github.com/siddontang/go-mysql/mysql",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/juju/errors:go_default_library",
"//vendor/github.com/satori/go.uuid:go_default_library",
"//vendor/github.com/siddontang/go/hack:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

164
vendor/github.com/siddontang/go-mysql/mysql/const.go generated vendored Normal file
View File

@@ -0,0 +1,164 @@
package mysql
const (
MinProtocolVersion byte = 10
MaxPayloadLen int = 1<<24 - 1
TimeFormat string = "2006-01-02 15:04:05"
)
var (
// maybe you can change for your specified name
ServerVersion string = "5.7.0"
)
const (
OK_HEADER byte = 0x00
ERR_HEADER byte = 0xff
EOF_HEADER byte = 0xfe
LocalInFile_HEADER byte = 0xfb
)
const (
SERVER_STATUS_IN_TRANS uint16 = 0x0001
SERVER_STATUS_AUTOCOMMIT uint16 = 0x0002
SERVER_MORE_RESULTS_EXISTS uint16 = 0x0008
SERVER_STATUS_NO_GOOD_INDEX_USED uint16 = 0x0010
SERVER_STATUS_NO_INDEX_USED uint16 = 0x0020
SERVER_STATUS_CURSOR_EXISTS uint16 = 0x0040
SERVER_STATUS_LAST_ROW_SEND uint16 = 0x0080
SERVER_STATUS_DB_DROPPED uint16 = 0x0100
SERVER_STATUS_NO_BACKSLASH_ESCAPED uint16 = 0x0200
SERVER_STATUS_METADATA_CHANGED uint16 = 0x0400
SERVER_QUERY_WAS_SLOW uint16 = 0x0800
SERVER_PS_OUT_PARAMS uint16 = 0x1000
)
const (
COM_SLEEP byte = iota
COM_QUIT
COM_INIT_DB
COM_QUERY
COM_FIELD_LIST
COM_CREATE_DB
COM_DROP_DB
COM_REFRESH
COM_SHUTDOWN
COM_STATISTICS
COM_PROCESS_INFO
COM_CONNECT
COM_PROCESS_KILL
COM_DEBUG
COM_PING
COM_TIME
COM_DELAYED_INSERT
COM_CHANGE_USER
COM_BINLOG_DUMP
COM_TABLE_DUMP
COM_CONNECT_OUT
COM_REGISTER_SLAVE
COM_STMT_PREPARE
COM_STMT_EXECUTE
COM_STMT_SEND_LONG_DATA
COM_STMT_CLOSE
COM_STMT_RESET
COM_SET_OPTION
COM_STMT_FETCH
COM_DAEMON
COM_BINLOG_DUMP_GTID
COM_RESET_CONNECTION
)
const (
CLIENT_LONG_PASSWORD uint32 = 1 << iota
CLIENT_FOUND_ROWS
CLIENT_LONG_FLAG
CLIENT_CONNECT_WITH_DB
CLIENT_NO_SCHEMA
CLIENT_COMPRESS
CLIENT_ODBC
CLIENT_LOCAL_FILES
CLIENT_IGNORE_SPACE
CLIENT_PROTOCOL_41
CLIENT_INTERACTIVE
CLIENT_SSL
CLIENT_IGNORE_SIGPIPE
CLIENT_TRANSACTIONS
CLIENT_RESERVED
CLIENT_SECURE_CONNECTION
CLIENT_MULTI_STATEMENTS
CLIENT_MULTI_RESULTS
CLIENT_PS_MULTI_RESULTS
CLIENT_PLUGIN_AUTH
CLIENT_CONNECT_ATTRS
CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA
)
const (
MYSQL_TYPE_DECIMAL byte = iota
MYSQL_TYPE_TINY
MYSQL_TYPE_SHORT
MYSQL_TYPE_LONG
MYSQL_TYPE_FLOAT
MYSQL_TYPE_DOUBLE
MYSQL_TYPE_NULL
MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_LONGLONG
MYSQL_TYPE_INT24
MYSQL_TYPE_DATE
MYSQL_TYPE_TIME
MYSQL_TYPE_DATETIME
MYSQL_TYPE_YEAR
MYSQL_TYPE_NEWDATE
MYSQL_TYPE_VARCHAR
MYSQL_TYPE_BIT
//mysql 5.6
MYSQL_TYPE_TIMESTAMP2
MYSQL_TYPE_DATETIME2
MYSQL_TYPE_TIME2
)
const (
MYSQL_TYPE_JSON byte = iota + 0xf5
MYSQL_TYPE_NEWDECIMAL
MYSQL_TYPE_ENUM
MYSQL_TYPE_SET
MYSQL_TYPE_TINY_BLOB
MYSQL_TYPE_MEDIUM_BLOB
MYSQL_TYPE_LONG_BLOB
MYSQL_TYPE_BLOB
MYSQL_TYPE_VAR_STRING
MYSQL_TYPE_STRING
MYSQL_TYPE_GEOMETRY
)
const (
NOT_NULL_FLAG = 1
PRI_KEY_FLAG = 2
UNIQUE_KEY_FLAG = 4
BLOB_FLAG = 16
UNSIGNED_FLAG = 32
ZEROFILL_FLAG = 64
BINARY_FLAG = 128
ENUM_FLAG = 256
AUTO_INCREMENT_FLAG = 512
TIMESTAMP_FLAG = 1024
SET_FLAG = 2048
NUM_FLAG = 32768
PART_KEY_FLAG = 16384
GROUP_FLAG = 32768
UNIQUE_FLAG = 65536
)
const (
AUTH_NAME = "mysql_native_password"
DEFAULT_CHARSET = "utf8"
DEFAULT_COLLATION_ID uint8 = 33
DEFAULT_COLLATION_NAME string = "utf8_general_ci"
)
// Like vitess, use flavor for different MySQL versions,
const (
MySQLFlavor = "mysql"
MariaDBFlavor = "mariadb"
)

870
vendor/github.com/siddontang/go-mysql/mysql/errcode.go generated vendored Normal file
View File

@@ -0,0 +1,870 @@
package mysql
const (
ER_ERROR_FIRST uint16 = 1000
ER_HASHCHK = 1000
ER_NISAMCHK = 1001
ER_NO = 1002
ER_YES = 1003
ER_CANT_CREATE_FILE = 1004
ER_CANT_CREATE_TABLE = 1005
ER_CANT_CREATE_DB = 1006
ER_DB_CREATE_EXISTS = 1007
ER_DB_DROP_EXISTS = 1008
ER_DB_DROP_DELETE = 1009
ER_DB_DROP_RMDIR = 1010
ER_CANT_DELETE_FILE = 1011
ER_CANT_FIND_SYSTEM_REC = 1012
ER_CANT_GET_STAT = 1013
ER_CANT_GET_WD = 1014
ER_CANT_LOCK = 1015
ER_CANT_OPEN_FILE = 1016
ER_FILE_NOT_FOUND = 1017
ER_CANT_READ_DIR = 1018
ER_CANT_SET_WD = 1019
ER_CHECKREAD = 1020
ER_DISK_FULL = 1021
ER_DUP_KEY = 1022
ER_ERROR_ON_CLOSE = 1023
ER_ERROR_ON_READ = 1024
ER_ERROR_ON_RENAME = 1025
ER_ERROR_ON_WRITE = 1026
ER_FILE_USED = 1027
ER_FILSORT_ABORT = 1028
ER_FORM_NOT_FOUND = 1029
ER_GET_ERRNO = 1030
ER_ILLEGAL_HA = 1031
ER_KEY_NOT_FOUND = 1032
ER_NOT_FORM_FILE = 1033
ER_NOT_KEYFILE = 1034
ER_OLD_KEYFILE = 1035
ER_OPEN_AS_READONLY = 1036
ER_OUTOFMEMORY = 1037
ER_OUT_OF_SORTMEMORY = 1038
ER_UNEXPECTED_EOF = 1039
ER_CON_COUNT_ERROR = 1040
ER_OUT_OF_RESOURCES = 1041
ER_BAD_HOST_ERROR = 1042
ER_HANDSHAKE_ERROR = 1043
ER_DBACCESS_DENIED_ERROR = 1044
ER_ACCESS_DENIED_ERROR = 1045
ER_NO_DB_ERROR = 1046
ER_UNKNOWN_COM_ERROR = 1047
ER_BAD_NULL_ERROR = 1048
ER_BAD_DB_ERROR = 1049
ER_TABLE_EXISTS_ERROR = 1050
ER_BAD_TABLE_ERROR = 1051
ER_NON_UNIQ_ERROR = 1052
ER_SERVER_SHUTDOWN = 1053
ER_BAD_FIELD_ERROR = 1054
ER_WRONG_FIELD_WITH_GROUP = 1055
ER_WRONG_GROUP_FIELD = 1056
ER_WRONG_SUM_SELECT = 1057
ER_WRONG_VALUE_COUNT = 1058
ER_TOO_LONG_IDENT = 1059
ER_DUP_FIELDNAME = 1060
ER_DUP_KEYNAME = 1061
ER_DUP_ENTRY = 1062
ER_WRONG_FIELD_SPEC = 1063
ER_PARSE_ERROR = 1064
ER_EMPTY_QUERY = 1065
ER_NONUNIQ_TABLE = 1066
ER_INVALID_DEFAULT = 1067
ER_MULTIPLE_PRI_KEY = 1068
ER_TOO_MANY_KEYS = 1069
ER_TOO_MANY_KEY_PARTS = 1070
ER_TOO_LONG_KEY = 1071
ER_KEY_COLUMN_DOES_NOT_EXITS = 1072
ER_BLOB_USED_AS_KEY = 1073
ER_TOO_BIG_FIELDLENGTH = 1074
ER_WRONG_AUTO_KEY = 1075
ER_READY = 1076
ER_NORMAL_SHUTDOWN = 1077
ER_GOT_SIGNAL = 1078
ER_SHUTDOWN_COMPLETE = 1079
ER_FORCING_CLOSE = 1080
ER_IPSOCK_ERROR = 1081
ER_NO_SUCH_INDEX = 1082
ER_WRONG_FIELD_TERMINATORS = 1083
ER_BLOBS_AND_NO_TERMINATED = 1084
ER_TEXTFILE_NOT_READABLE = 1085
ER_FILE_EXISTS_ERROR = 1086
ER_LOAD_INFO = 1087
ER_ALTER_INFO = 1088
ER_WRONG_SUB_KEY = 1089
ER_CANT_REMOVE_ALL_FIELDS = 1090
ER_CANT_DROP_FIELD_OR_KEY = 1091
ER_INSERT_INFO = 1092
ER_UPDATE_TABLE_USED = 1093
ER_NO_SUCH_THREAD = 1094
ER_KILL_DENIED_ERROR = 1095
ER_NO_TABLES_USED = 1096
ER_TOO_BIG_SET = 1097
ER_NO_UNIQUE_LOGFILE = 1098
ER_TABLE_NOT_LOCKED_FOR_WRITE = 1099
ER_TABLE_NOT_LOCKED = 1100
ER_BLOB_CANT_HAVE_DEFAULT = 1101
ER_WRONG_DB_NAME = 1102
ER_WRONG_TABLE_NAME = 1103
ER_TOO_BIG_SELECT = 1104
ER_UNKNOWN_ERROR = 1105
ER_UNKNOWN_PROCEDURE = 1106
ER_WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
ER_WRONG_PARAMETERS_TO_PROCEDURE = 1108
ER_UNKNOWN_TABLE = 1109
ER_FIELD_SPECIFIED_TWICE = 1110
ER_INVALID_GROUP_FUNC_USE = 1111
ER_UNSUPPORTED_EXTENSION = 1112
ER_TABLE_MUST_HAVE_COLUMNS = 1113
ER_RECORD_FILE_FULL = 1114
ER_UNKNOWN_CHARACTER_SET = 1115
ER_TOO_MANY_TABLES = 1116
ER_TOO_MANY_FIELDS = 1117
ER_TOO_BIG_ROWSIZE = 1118
ER_STACK_OVERRUN = 1119
ER_WRONG_OUTER_JOIN = 1120
ER_NULL_COLUMN_IN_INDEX = 1121
ER_CANT_FIND_UDF = 1122
ER_CANT_INITIALIZE_UDF = 1123
ER_UDF_NO_PATHS = 1124
ER_UDF_EXISTS = 1125
ER_CANT_OPEN_LIBRARY = 1126
ER_CANT_FIND_DL_ENTRY = 1127
ER_FUNCTION_NOT_DEFINED = 1128
ER_HOST_IS_BLOCKED = 1129
ER_HOST_NOT_PRIVILEGED = 1130
ER_PASSWORD_ANONYMOUS_USER = 1131
ER_PASSWORD_NOT_ALLOWED = 1132
ER_PASSWORD_NO_MATCH = 1133
ER_UPDATE_INFO = 1134
ER_CANT_CREATE_THREAD = 1135
ER_WRONG_VALUE_COUNT_ON_ROW = 1136
ER_CANT_REOPEN_TABLE = 1137
ER_INVALID_USE_OF_NULL = 1138
ER_REGEXP_ERROR = 1139
ER_MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
ER_NONEXISTING_GRANT = 1141
ER_TABLEACCESS_DENIED_ERROR = 1142
ER_COLUMNACCESS_DENIED_ERROR = 1143
ER_ILLEGAL_GRANT_FOR_TABLE = 1144
ER_GRANT_WRONG_HOST_OR_USER = 1145
ER_NO_SUCH_TABLE = 1146
ER_NONEXISTING_TABLE_GRANT = 1147
ER_NOT_ALLOWED_COMMAND = 1148
ER_SYNTAX_ERROR = 1149
ER_DELAYED_CANT_CHANGE_LOCK = 1150
ER_TOO_MANY_DELAYED_THREADS = 1151
ER_ABORTING_CONNECTION = 1152
ER_NET_PACKET_TOO_LARGE = 1153
ER_NET_READ_ERROR_FROM_PIPE = 1154
ER_NET_FCNTL_ERROR = 1155
ER_NET_PACKETS_OUT_OF_ORDER = 1156
ER_NET_UNCOMPRESS_ERROR = 1157
ER_NET_READ_ERROR = 1158
ER_NET_READ_INTERRUPTED = 1159
ER_NET_ERROR_ON_WRITE = 1160
ER_NET_WRITE_INTERRUPTED = 1161
ER_TOO_LONG_STRING = 1162
ER_TABLE_CANT_HANDLE_BLOB = 1163
ER_TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
ER_DELAYED_INSERT_TABLE_LOCKED = 1165
ER_WRONG_COLUMN_NAME = 1166
ER_WRONG_KEY_COLUMN = 1167
ER_WRONG_MRG_TABLE = 1168
ER_DUP_UNIQUE = 1169
ER_BLOB_KEY_WITHOUT_LENGTH = 1170
ER_PRIMARY_CANT_HAVE_NULL = 1171
ER_TOO_MANY_ROWS = 1172
ER_REQUIRES_PRIMARY_KEY = 1173
ER_NO_RAID_COMPILED = 1174
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
ER_KEY_DOES_NOT_EXITS = 1176
ER_CHECK_NO_SUCH_TABLE = 1177
ER_CHECK_NOT_IMPLEMENTED = 1178
ER_CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
ER_ERROR_DURING_COMMIT = 1180
ER_ERROR_DURING_ROLLBACK = 1181
ER_ERROR_DURING_FLUSH_LOGS = 1182
ER_ERROR_DURING_CHECKPOINT = 1183
ER_NEW_ABORTING_CONNECTION = 1184
ER_DUMP_NOT_IMPLEMENTED = 1185
ER_FLUSH_MASTER_BINLOG_CLOSED = 1186
ER_INDEX_REBUILD = 1187
ER_MASTER = 1188
ER_MASTER_NET_READ = 1189
ER_MASTER_NET_WRITE = 1190
ER_FT_MATCHING_KEY_NOT_FOUND = 1191
ER_LOCK_OR_ACTIVE_TRANSACTION = 1192
ER_UNKNOWN_SYSTEM_VARIABLE = 1193
ER_CRASHED_ON_USAGE = 1194
ER_CRASHED_ON_REPAIR = 1195
ER_WARNING_NOT_COMPLETE_ROLLBACK = 1196
ER_TRANS_CACHE_FULL = 1197
ER_SLAVE_MUST_STOP = 1198
ER_SLAVE_NOT_RUNNING = 1199
ER_BAD_SLAVE = 1200
ER_MASTER_INFO = 1201
ER_SLAVE_THREAD = 1202
ER_TOO_MANY_USER_CONNECTIONS = 1203
ER_SET_CONSTANTS_ONLY = 1204
ER_LOCK_WAIT_TIMEOUT = 1205
ER_LOCK_TABLE_FULL = 1206
ER_READ_ONLY_TRANSACTION = 1207
ER_DROP_DB_WITH_READ_LOCK = 1208
ER_CREATE_DB_WITH_READ_LOCK = 1209
ER_WRONG_ARGUMENTS = 1210
ER_NO_PERMISSION_TO_CREATE_USER = 1211
ER_UNION_TABLES_IN_DIFFERENT_DIR = 1212
ER_LOCK_DEADLOCK = 1213
ER_TABLE_CANT_HANDLE_FT = 1214
ER_CANNOT_ADD_FOREIGN = 1215
ER_NO_REFERENCED_ROW = 1216
ER_ROW_IS_REFERENCED = 1217
ER_CONNECT_TO_MASTER = 1218
ER_QUERY_ON_MASTER = 1219
ER_ERROR_WHEN_EXECUTING_COMMAND = 1220
ER_WRONG_USAGE = 1221
ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
ER_CANT_UPDATE_WITH_READLOCK = 1223
ER_MIXING_NOT_ALLOWED = 1224
ER_DUP_ARGUMENT = 1225
ER_USER_LIMIT_REACHED = 1226
ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227
ER_LOCAL_VARIABLE = 1228
ER_GLOBAL_VARIABLE = 1229
ER_NO_DEFAULT = 1230
ER_WRONG_VALUE_FOR_VAR = 1231
ER_WRONG_TYPE_FOR_VAR = 1232
ER_VAR_CANT_BE_READ = 1233
ER_CANT_USE_OPTION_HERE = 1234
ER_NOT_SUPPORTED_YET = 1235
ER_MASTER_FATAL_ERROR_READING_BINLOG = 1236
ER_SLAVE_IGNORED_TABLE = 1237
ER_INCORRECT_GLOBAL_LOCAL_VAR = 1238
ER_WRONG_FK_DEF = 1239
ER_KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
ER_OPERAND_COLUMNS = 1241
ER_SUBQUERY_NO_1_ROW = 1242
ER_UNKNOWN_STMT_HANDLER = 1243
ER_CORRUPT_HELP_DB = 1244
ER_CYCLIC_REFERENCE = 1245
ER_AUTO_CONVERT = 1246
ER_ILLEGAL_REFERENCE = 1247
ER_DERIVED_MUST_HAVE_ALIAS = 1248
ER_SELECT_REDUCED = 1249
ER_TABLENAME_NOT_ALLOWED_HERE = 1250
ER_NOT_SUPPORTED_AUTH_MODE = 1251
ER_SPATIAL_CANT_HAVE_NULL = 1252
ER_COLLATION_CHARSET_MISMATCH = 1253
ER_SLAVE_WAS_RUNNING = 1254
ER_SLAVE_WAS_NOT_RUNNING = 1255
ER_TOO_BIG_FOR_UNCOMPRESS = 1256
ER_ZLIB_Z_MEM_ERROR = 1257
ER_ZLIB_Z_BUF_ERROR = 1258
ER_ZLIB_Z_DATA_ERROR = 1259
ER_CUT_VALUE_GROUP_CONCAT = 1260
ER_WARN_TOO_FEW_RECORDS = 1261
ER_WARN_TOO_MANY_RECORDS = 1262
ER_WARN_NULL_TO_NOTNULL = 1263
ER_WARN_DATA_OUT_OF_RANGE = 1264
WARN_DATA_TRUNCATED = 1265
ER_WARN_USING_OTHER_HANDLER = 1266
ER_CANT_AGGREGATE_2COLLATIONS = 1267
ER_DROP_USER = 1268
ER_REVOKE_GRANTS = 1269
ER_CANT_AGGREGATE_3COLLATIONS = 1270
ER_CANT_AGGREGATE_NCOLLATIONS = 1271
ER_VARIABLE_IS_NOT_STRUCT = 1272
ER_UNKNOWN_COLLATION = 1273
ER_SLAVE_IGNORED_SSL_PARAMS = 1274
ER_SERVER_IS_IN_SECURE_AUTH_MODE = 1275
ER_WARN_FIELD_RESOLVED = 1276
ER_BAD_SLAVE_UNTIL_COND = 1277
ER_MISSING_SKIP_SLAVE = 1278
ER_UNTIL_COND_IGNORED = 1279
ER_WRONG_NAME_FOR_INDEX = 1280
ER_WRONG_NAME_FOR_CATALOG = 1281
ER_WARN_QC_RESIZE = 1282
ER_BAD_FT_COLUMN = 1283
ER_UNKNOWN_KEY_CACHE = 1284
ER_WARN_HOSTNAME_WONT_WORK = 1285
ER_UNKNOWN_STORAGE_ENGINE = 1286
ER_WARN_DEPRECATED_SYNTAX = 1287
ER_NON_UPDATABLE_TABLE = 1288
ER_FEATURE_DISABLED = 1289
ER_OPTION_PREVENTS_STATEMENT = 1290
ER_DUPLICATED_VALUE_IN_TYPE = 1291
ER_TRUNCATED_WRONG_VALUE = 1292
ER_TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
ER_INVALID_ON_UPDATE = 1294
ER_UNSUPPORTED_PS = 1295
ER_GET_ERRMSG = 1296
ER_GET_TEMPORARY_ERRMSG = 1297
ER_UNKNOWN_TIME_ZONE = 1298
ER_WARN_INVALID_TIMESTAMP = 1299
ER_INVALID_CHARACTER_STRING = 1300
ER_WARN_ALLOWED_PACKET_OVERFLOWED = 1301
ER_CONFLICTING_DECLARATIONS = 1302
ER_SP_NO_RECURSIVE_CREATE = 1303
ER_SP_ALREADY_EXISTS = 1304
ER_SP_DOES_NOT_EXIST = 1305
ER_SP_DROP_FAILED = 1306
ER_SP_STORE_FAILED = 1307
ER_SP_LILABEL_MISMATCH = 1308
ER_SP_LABEL_REDEFINE = 1309
ER_SP_LABEL_MISMATCH = 1310
ER_SP_UNINIT_VAR = 1311
ER_SP_BADSELECT = 1312
ER_SP_BADRETURN = 1313
ER_SP_BADSTATEMENT = 1314
ER_UPDATE_LOG_DEPRECATED_IGNORED = 1315
ER_UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
ER_QUERY_INTERRUPTED = 1317
ER_SP_WRONG_NO_OF_ARGS = 1318
ER_SP_COND_MISMATCH = 1319
ER_SP_NORETURN = 1320
ER_SP_NORETURNEND = 1321
ER_SP_BAD_CURSOR_QUERY = 1322
ER_SP_BAD_CURSOR_SELECT = 1323
ER_SP_CURSOR_MISMATCH = 1324
ER_SP_CURSOR_ALREADY_OPEN = 1325
ER_SP_CURSOR_NOT_OPEN = 1326
ER_SP_UNDECLARED_VAR = 1327
ER_SP_WRONG_NO_OF_FETCH_ARGS = 1328
ER_SP_FETCH_NO_DATA = 1329
ER_SP_DUP_PARAM = 1330
ER_SP_DUP_VAR = 1331
ER_SP_DUP_COND = 1332
ER_SP_DUP_CURS = 1333
ER_SP_CANT_ALTER = 1334
ER_SP_SUBSELECT_NYI = 1335
ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
ER_SP_VARCOND_AFTER_CURSHNDLR = 1337
ER_SP_CURSOR_AFTER_HANDLER = 1338
ER_SP_CASE_NOT_FOUND = 1339
ER_FPARSER_TOO_BIG_FILE = 1340
ER_FPARSER_BAD_HEADER = 1341
ER_FPARSER_EOF_IN_COMMENT = 1342
ER_FPARSER_ERROR_IN_PARAMETER = 1343
ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
ER_VIEW_NO_EXPLAIN = 1345
ER_FRM_UNKNOWN_TYPE = 1346
ER_WRONG_OBJECT = 1347
ER_NONUPDATEABLE_COLUMN = 1348
ER_VIEW_SELECT_DERIVED = 1349
ER_VIEW_SELECT_CLAUSE = 1350
ER_VIEW_SELECT_VARIABLE = 1351
ER_VIEW_SELECT_TMPTABLE = 1352
ER_VIEW_WRONG_LIST = 1353
ER_WARN_VIEW_MERGE = 1354
ER_WARN_VIEW_WITHOUT_KEY = 1355
ER_VIEW_INVALID = 1356
ER_SP_NO_DROP_SP = 1357
ER_SP_GOTO_IN_HNDLR = 1358
ER_TRG_ALREADY_EXISTS = 1359
ER_TRG_DOES_NOT_EXIST = 1360
ER_TRG_ON_VIEW_OR_TEMP_TABLE = 1361
ER_TRG_CANT_CHANGE_ROW = 1362
ER_TRG_NO_SUCH_ROW_IN_TRG = 1363
ER_NO_DEFAULT_FOR_FIELD = 1364
ER_DIVISION_BY_ZERO = 1365
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
ER_ILLEGAL_VALUE_FOR_TYPE = 1367
ER_VIEW_NONUPD_CHECK = 1368
ER_VIEW_CHECK_FAILED = 1369
ER_PROCACCESS_DENIED_ERROR = 1370
ER_RELAY_LOG_FAIL = 1371
ER_PASSWD_LENGTH = 1372
ER_UNKNOWN_TARGET_BINLOG = 1373
ER_IO_ERR_LOG_INDEX_READ = 1374
ER_BINLOG_PURGE_PROHIBITED = 1375
ER_FSEEK_FAIL = 1376
ER_BINLOG_PURGE_FATAL_ERR = 1377
ER_LOG_IN_USE = 1378
ER_LOG_PURGE_UNKNOWN_ERR = 1379
ER_RELAY_LOG_INIT = 1380
ER_NO_BINARY_LOGGING = 1381
ER_RESERVED_SYNTAX = 1382
ER_WSAS_FAILED = 1383
ER_DIFF_GROUPS_PROC = 1384
ER_NO_GROUP_FOR_PROC = 1385
ER_ORDER_WITH_PROC = 1386
ER_LOGGING_PROHIBIT_CHANGING_OF = 1387
ER_NO_FILE_MAPPING = 1388
ER_WRONG_MAGIC = 1389
ER_PS_MANY_PARAM = 1390
ER_KEY_PART_0 = 1391
ER_VIEW_CHECKSUM = 1392
ER_VIEW_MULTIUPDATE = 1393
ER_VIEW_NO_INSERT_FIELD_LIST = 1394
ER_VIEW_DELETE_MERGE_VIEW = 1395
ER_CANNOT_USER = 1396
ER_XAER_NOTA = 1397
ER_XAER_INVAL = 1398
ER_XAER_RMFAIL = 1399
ER_XAER_OUTSIDE = 1400
ER_XAER_RMERR = 1401
ER_XA_RBROLLBACK = 1402
ER_NONEXISTING_PROC_GRANT = 1403
ER_PROC_AUTO_GRANT_FAIL = 1404
ER_PROC_AUTO_REVOKE_FAIL = 1405
ER_DATA_TOO_LONG = 1406
ER_SP_BAD_SQLSTATE = 1407
ER_STARTUP = 1408
ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
ER_CANT_CREATE_USER_WITH_GRANT = 1410
ER_WRONG_VALUE_FOR_TYPE = 1411
ER_TABLE_DEF_CHANGED = 1412
ER_SP_DUP_HANDLER = 1413
ER_SP_NOT_VAR_ARG = 1414
ER_SP_NO_RETSET = 1415
ER_CANT_CREATE_GEOMETRY_OBJECT = 1416
ER_FAILED_ROUTINE_BREAK_BINLOG = 1417
ER_BINLOG_UNSAFE_ROUTINE = 1418
ER_BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
ER_EXEC_STMT_WITH_OPEN_CURSOR = 1420
ER_STMT_HAS_NO_OPEN_CURSOR = 1421
ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
ER_NO_DEFAULT_FOR_VIEW_FIELD = 1423
ER_SP_NO_RECURSION = 1424
ER_TOO_BIG_SCALE = 1425
ER_TOO_BIG_PRECISION = 1426
ER_M_BIGGER_THAN_D = 1427
ER_WRONG_LOCK_OF_SYSTEM_TABLE = 1428
ER_CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
ER_QUERY_ON_FOREIGN_DATA_SOURCE = 1430
ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
ER_FOREIGN_DATA_STRING_INVALID = 1433
ER_CANT_CREATE_FEDERATED_TABLE = 1434
ER_TRG_IN_WRONG_SCHEMA = 1435
ER_STACK_OVERRUN_NEED_MORE = 1436
ER_TOO_LONG_BODY = 1437
ER_WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
ER_TOO_BIG_DISPLAYWIDTH = 1439
ER_XAER_DUPID = 1440
ER_DATETIME_FUNCTION_OVERFLOW = 1441
ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
ER_VIEW_PREVENT_UPDATE = 1443
ER_PS_NO_RECURSION = 1444
ER_SP_CANT_SET_AUTOCOMMIT = 1445
ER_MALFORMED_DEFINER = 1446
ER_VIEW_FRM_NO_USER = 1447
ER_VIEW_OTHER_USER = 1448
ER_NO_SUCH_USER = 1449
ER_FORBID_SCHEMA_CHANGE = 1450
ER_ROW_IS_REFERENCED_2 = 1451
ER_NO_REFERENCED_ROW_2 = 1452
ER_SP_BAD_VAR_SHADOW = 1453
ER_TRG_NO_DEFINER = 1454
ER_OLD_FILE_FORMAT = 1455
ER_SP_RECURSION_LIMIT = 1456
ER_SP_PROC_TABLE_CORRUPT = 1457
ER_SP_WRONG_NAME = 1458
ER_TABLE_NEEDS_UPGRADE = 1459
ER_SP_NO_AGGREGATE = 1460
ER_MAX_PREPARED_STMT_COUNT_REACHED = 1461
ER_VIEW_RECURSIVE = 1462
ER_NON_GROUPING_FIELD_USED = 1463
ER_TABLE_CANT_HANDLE_SPKEYS = 1464
ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465
ER_REMOVED_SPACES = 1466
ER_AUTOINC_READ_FAILED = 1467
ER_USERNAME = 1468
ER_HOSTNAME = 1469
ER_WRONG_STRING_LENGTH = 1470
ER_NON_INSERTABLE_TABLE = 1471
ER_ADMIN_WRONG_MRG_TABLE = 1472
ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT = 1473
ER_NAME_BECOMES_EMPTY = 1474
ER_AMBIGUOUS_FIELD_TERM = 1475
ER_FOREIGN_SERVER_EXISTS = 1476
ER_FOREIGN_SERVER_DOESNT_EXIST = 1477
ER_ILLEGAL_HA_CREATE_OPTION = 1478
ER_PARTITION_REQUIRES_VALUES_ERROR = 1479
ER_PARTITION_WRONG_VALUES_ERROR = 1480
ER_PARTITION_MAXVALUE_ERROR = 1481
ER_PARTITION_SUBPARTITION_ERROR = 1482
ER_PARTITION_SUBPART_MIX_ERROR = 1483
ER_PARTITION_WRONG_NO_PART_ERROR = 1484
ER_PARTITION_WRONG_NO_SUBPART_ERROR = 1485
ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR = 1486
ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR = 1487
ER_FIELD_NOT_FOUND_PART_ERROR = 1488
ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR = 1489
ER_INCONSISTENT_PARTITION_INFO_ERROR = 1490
ER_PARTITION_FUNC_NOT_ALLOWED_ERROR = 1491
ER_PARTITIONS_MUST_BE_DEFINED_ERROR = 1492
ER_RANGE_NOT_INCREASING_ERROR = 1493
ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR = 1494
ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR = 1495
ER_PARTITION_ENTRY_ERROR = 1496
ER_MIX_HANDLER_ERROR = 1497
ER_PARTITION_NOT_DEFINED_ERROR = 1498
ER_TOO_MANY_PARTITIONS_ERROR = 1499
ER_SUBPARTITION_ERROR = 1500
ER_CANT_CREATE_HANDLER_FILE = 1501
ER_BLOB_FIELD_IN_PART_FUNC_ERROR = 1502
ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF = 1503
ER_NO_PARTS_ERROR = 1504
ER_PARTITION_MGMT_ON_NONPARTITIONED = 1505
ER_FOREIGN_KEY_ON_PARTITIONED = 1506
ER_DROP_PARTITION_NON_EXISTENT = 1507
ER_DROP_LAST_PARTITION = 1508
ER_COALESCE_ONLY_ON_HASH_PARTITION = 1509
ER_REORG_HASH_ONLY_ON_SAME_NO = 1510
ER_REORG_NO_PARAM_ERROR = 1511
ER_ONLY_ON_RANGE_LIST_PARTITION = 1512
ER_ADD_PARTITION_SUBPART_ERROR = 1513
ER_ADD_PARTITION_NO_NEW_PARTITION = 1514
ER_COALESCE_PARTITION_NO_PARTITION = 1515
ER_REORG_PARTITION_NOT_EXIST = 1516
ER_SAME_NAME_PARTITION = 1517
ER_NO_BINLOG_ERROR = 1518
ER_CONSECUTIVE_REORG_PARTITIONS = 1519
ER_REORG_OUTSIDE_RANGE = 1520
ER_PARTITION_FUNCTION_FAILURE = 1521
ER_PART_STATE_ERROR = 1522
ER_LIMITED_PART_RANGE = 1523
ER_PLUGIN_IS_NOT_LOADED = 1524
ER_WRONG_VALUE = 1525
ER_NO_PARTITION_FOR_GIVEN_VALUE = 1526
ER_FILEGROUP_OPTION_ONLY_ONCE = 1527
ER_CREATE_FILEGROUP_FAILED = 1528
ER_DROP_FILEGROUP_FAILED = 1529
ER_TABLESPACE_AUTO_EXTEND_ERROR = 1530
ER_WRONG_SIZE_NUMBER = 1531
ER_SIZE_OVERFLOW_ERROR = 1532
ER_ALTER_FILEGROUP_FAILED = 1533
ER_BINLOG_ROW_LOGGING_FAILED = 1534
ER_BINLOG_ROW_WRONG_TABLE_DEF = 1535
ER_BINLOG_ROW_RBR_TO_SBR = 1536
ER_EVENT_ALREADY_EXISTS = 1537
ER_EVENT_STORE_FAILED = 1538
ER_EVENT_DOES_NOT_EXIST = 1539
ER_EVENT_CANT_ALTER = 1540
ER_EVENT_DROP_FAILED = 1541
ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG = 1542
ER_EVENT_ENDS_BEFORE_STARTS = 1543
ER_EVENT_EXEC_TIME_IN_THE_PAST = 1544
ER_EVENT_OPEN_TABLE_FAILED = 1545
ER_EVENT_NEITHER_M_EXPR_NOR_M_AT = 1546
ER_OBSOLETE_COL_COUNT_DOESNT_MATCH_CORRUPTED = 1547
ER_OBSOLETE_CANNOT_LOAD_FROM_TABLE = 1548
ER_EVENT_CANNOT_DELETE = 1549
ER_EVENT_COMPILE_ERROR = 1550
ER_EVENT_SAME_NAME = 1551
ER_EVENT_DATA_TOO_LONG = 1552
ER_DROP_INDEX_FK = 1553
ER_WARN_DEPRECATED_SYNTAX_WITH_VER = 1554
ER_CANT_WRITE_LOCK_LOG_TABLE = 1555
ER_CANT_LOCK_LOG_TABLE = 1556
ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED = 1557
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE = 1558
ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR = 1559
ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1560
ER_NDB_CANT_SWITCH_BINLOG_FORMAT = 1561
ER_PARTITION_NO_TEMPORARY = 1562
ER_PARTITION_CONST_DOMAIN_ERROR = 1563
ER_PARTITION_FUNCTION_IS_NOT_ALLOWED = 1564
ER_DDL_LOG_ERROR = 1565
ER_NULL_IN_VALUES_LESS_THAN = 1566
ER_WRONG_PARTITION_NAME = 1567
ER_CANT_CHANGE_TX_CHARACTERISTICS = 1568
ER_DUP_ENTRY_AUTOINCREMENT_CASE = 1569
ER_EVENT_MODIFY_QUEUE_ERROR = 1570
ER_EVENT_SET_VAR_ERROR = 1571
ER_PARTITION_MERGE_ERROR = 1572
ER_CANT_ACTIVATE_LOG = 1573
ER_RBR_NOT_AVAILABLE = 1574
ER_BASE64_DECODE_ERROR = 1575
ER_EVENT_RECURSION_FORBIDDEN = 1576
ER_EVENTS_DB_ERROR = 1577
ER_ONLY_INTEGERS_ALLOWED = 1578
ER_UNSUPORTED_LOG_ENGINE = 1579
ER_BAD_LOG_STATEMENT = 1580
ER_CANT_RENAME_LOG_TABLE = 1581
ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT = 1582
ER_WRONG_PARAMETERS_TO_NATIVE_FCT = 1583
ER_WRONG_PARAMETERS_TO_STORED_FCT = 1584
ER_NATIVE_FCT_NAME_COLLISION = 1585
ER_DUP_ENTRY_WITH_KEY_NAME = 1586
ER_BINLOG_PURGE_EMFILE = 1587
ER_EVENT_CANNOT_CREATE_IN_THE_PAST = 1588
ER_EVENT_CANNOT_ALTER_IN_THE_PAST = 1589
ER_SLAVE_INCIDENT = 1590
ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT = 1591
ER_BINLOG_UNSAFE_STATEMENT = 1592
ER_SLAVE_FATAL_ERROR = 1593
ER_SLAVE_RELAY_LOG_READ_FAILURE = 1594
ER_SLAVE_RELAY_LOG_WRITE_FAILURE = 1595
ER_SLAVE_CREATE_EVENT_FAILURE = 1596
ER_SLAVE_MASTER_COM_FAILURE = 1597
ER_BINLOG_LOGGING_IMPOSSIBLE = 1598
ER_VIEW_NO_CREATION_CTX = 1599
ER_VIEW_INVALID_CREATION_CTX = 1600
ER_SR_INVALID_CREATION_CTX = 1601
ER_TRG_CORRUPTED_FILE = 1602
ER_TRG_NO_CREATION_CTX = 1603
ER_TRG_INVALID_CREATION_CTX = 1604
ER_EVENT_INVALID_CREATION_CTX = 1605
ER_TRG_CANT_OPEN_TABLE = 1606
ER_CANT_CREATE_SROUTINE = 1607
ER_NEVER_USED = 1608
ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT = 1609
ER_SLAVE_CORRUPT_EVENT = 1610
ER_LOAD_DATA_INVALID_COLUMN = 1611
ER_LOG_PURGE_NO_FILE = 1612
ER_XA_RBTIMEOUT = 1613
ER_XA_RBDEADLOCK = 1614
ER_NEED_REPREPARE = 1615
ER_DELAYED_NOT_SUPPORTED = 1616
WARN_NO_MASTER_INFO = 1617
WARN_OPTION_IGNORED = 1618
WARN_PLUGIN_DELETE_BUILTIN = 1619
WARN_PLUGIN_BUSY = 1620
ER_VARIABLE_IS_READONLY = 1621
ER_WARN_ENGINE_TRANSACTION_ROLLBACK = 1622
ER_SLAVE_HEARTBEAT_FAILURE = 1623
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE = 1624
ER_NDB_REPLICATION_SCHEMA_ERROR = 1625
ER_CONFLICT_FN_PARSE_ERROR = 1626
ER_EXCEPTIONS_WRITE_ERROR = 1627
ER_TOO_LONG_TABLE_COMMENT = 1628
ER_TOO_LONG_FIELD_COMMENT = 1629
ER_FUNC_INEXISTENT_NAME_COLLISION = 1630
ER_DATABASE_NAME = 1631
ER_TABLE_NAME = 1632
ER_PARTITION_NAME = 1633
ER_SUBPARTITION_NAME = 1634
ER_TEMPORARY_NAME = 1635
ER_RENAMED_NAME = 1636
ER_TOO_MANY_CONCURRENT_TRXS = 1637
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED = 1638
ER_DEBUG_SYNC_TIMEOUT = 1639
ER_DEBUG_SYNC_HIT_LIMIT = 1640
ER_DUP_SIGNAL_SET = 1641
ER_SIGNAL_WARN = 1642
ER_SIGNAL_NOT_FOUND = 1643
ER_SIGNAL_EXCEPTION = 1644
ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER = 1645
ER_SIGNAL_BAD_CONDITION_TYPE = 1646
WARN_COND_ITEM_TRUNCATED = 1647
ER_COND_ITEM_TOO_LONG = 1648
ER_UNKNOWN_LOCALE = 1649
ER_SLAVE_IGNORE_SERVER_IDS = 1650
ER_QUERY_CACHE_DISABLED = 1651
ER_SAME_NAME_PARTITION_FIELD = 1652
ER_PARTITION_COLUMN_LIST_ERROR = 1653
ER_WRONG_TYPE_COLUMN_VALUE_ERROR = 1654
ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR = 1655
ER_MAXVALUE_IN_VALUES_IN = 1656
ER_TOO_MANY_VALUES_ERROR = 1657
ER_ROW_SINGLE_PARTITION_FIELD_ERROR = 1658
ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD = 1659
ER_PARTITION_FIELDS_TOO_LONG = 1660
ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE = 1661
ER_BINLOG_ROW_MODE_AND_STMT_ENGINE = 1662
ER_BINLOG_UNSAFE_AND_STMT_ENGINE = 1663
ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE = 1664
ER_BINLOG_STMT_MODE_AND_ROW_ENGINE = 1665
ER_BINLOG_ROW_INJECTION_AND_STMT_MODE = 1666
ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1667
ER_BINLOG_UNSAFE_LIMIT = 1668
ER_BINLOG_UNSAFE_INSERT_DELAYED = 1669
ER_BINLOG_UNSAFE_SYSTEM_TABLE = 1670
ER_BINLOG_UNSAFE_AUTOINC_COLUMNS = 1671
ER_BINLOG_UNSAFE_UDF = 1672
ER_BINLOG_UNSAFE_SYSTEM_VARIABLE = 1673
ER_BINLOG_UNSAFE_SYSTEM_FUNCTION = 1674
ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS = 1675
ER_MESSAGE_AND_STATEMENT = 1676
ER_SLAVE_CONVERSION_FAILED = 1677
ER_SLAVE_CANT_CREATE_CONVERSION = 1678
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1679
ER_PATH_LENGTH = 1680
ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT = 1681
ER_WRONG_NATIVE_TABLE_STRUCTURE = 1682
ER_WRONG_PERFSCHEMA_USAGE = 1683
ER_WARN_I_S_SKIPPED_TABLE = 1684
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1685
ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1686
ER_SPATIAL_MUST_HAVE_GEOM_COL = 1687
ER_TOO_LONG_INDEX_COMMENT = 1688
ER_LOCK_ABORTED = 1689
ER_DATA_OUT_OF_RANGE = 1690
ER_WRONG_SPVAR_TYPE_IN_LIMIT = 1691
ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1692
ER_BINLOG_UNSAFE_MIXED_STATEMENT = 1693
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1694
ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1695
ER_FAILED_READ_FROM_PAR_FILE = 1696
ER_VALUES_IS_NOT_INT_TYPE_ERROR = 1697
ER_ACCESS_DENIED_NO_PASSWORD_ERROR = 1698
ER_SET_PASSWORD_AUTH_PLUGIN = 1699
ER_GRANT_PLUGIN_USER_EXISTS = 1700
ER_TRUNCATE_ILLEGAL_FK = 1701
ER_PLUGIN_IS_PERMANENT = 1702
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN = 1703
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX = 1704
ER_STMT_CACHE_FULL = 1705
ER_MULTI_UPDATE_KEY_CONFLICT = 1706
ER_TABLE_NEEDS_REBUILD = 1707
WARN_OPTION_BELOW_LIMIT = 1708
ER_INDEX_COLUMN_TOO_LONG = 1709
ER_ERROR_IN_TRIGGER_BODY = 1710
ER_ERROR_IN_UNKNOWN_TRIGGER_BODY = 1711
ER_INDEX_CORRUPT = 1712
ER_UNDO_RECORD_TOO_BIG = 1713
ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT = 1714
ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE = 1715
ER_BINLOG_UNSAFE_REPLACE_SELECT = 1716
ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT = 1717
ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT = 1718
ER_BINLOG_UNSAFE_UPDATE_IGNORE = 1719
ER_PLUGIN_NO_UNINSTALL = 1720
ER_PLUGIN_NO_INSTALL = 1721
ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT = 1722
ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC = 1723
ER_BINLOG_UNSAFE_INSERT_TWO_KEYS = 1724
ER_TABLE_IN_FK_CHECK = 1725
ER_UNSUPPORTED_ENGINE = 1726
ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST = 1727
ER_CANNOT_LOAD_FROM_TABLE_V2 = 1728
ER_MASTER_DELAY_VALUE_OUT_OF_RANGE = 1729
ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT = 1730
ER_PARTITION_EXCHANGE_DIFFERENT_OPTION = 1731
ER_PARTITION_EXCHANGE_PART_TABLE = 1732
ER_PARTITION_EXCHANGE_TEMP_TABLE = 1733
ER_PARTITION_INSTEAD_OF_SUBPARTITION = 1734
ER_UNKNOWN_PARTITION = 1735
ER_TABLES_DIFFERENT_METADATA = 1736
ER_ROW_DOES_NOT_MATCH_PARTITION = 1737
ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX = 1738
ER_WARN_INDEX_NOT_APPLICABLE = 1739
ER_PARTITION_EXCHANGE_FOREIGN_KEY = 1740
ER_NO_SUCH_KEY_VALUE = 1741
ER_RPL_INFO_DATA_TOO_LONG = 1742
ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE = 1743
ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE = 1744
ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX = 1745
ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT = 1746
ER_PARTITION_CLAUSE_ON_NONPARTITIONED = 1747
ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET = 1748
ER_NO_SUCH_PARTITION__UNUSED = 1749
ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE = 1750
ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE = 1751
ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE = 1752
ER_MTS_FEATURE_IS_NOT_SUPPORTED = 1753
ER_MTS_UPDATED_DBS_GREATER_MAX = 1754
ER_MTS_CANT_PARALLEL = 1755
ER_MTS_INCONSISTENT_DATA = 1756
ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING = 1757
ER_DA_INVALID_CONDITION_NUMBER = 1758
ER_INSECURE_PLAIN_TEXT = 1759
ER_INSECURE_CHANGE_MASTER = 1760
ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO = 1761
ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO = 1762
ER_SQLTHREAD_WITH_SECURE_SLAVE = 1763
ER_TABLE_HAS_NO_FT = 1764
ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER = 1765
ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION = 1766
ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST = 1767
ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL = 1768
ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION = 1769
ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL = 1770
ER_SKIPPING_LOGGED_TRANSACTION = 1771
ER_MALFORMED_GTID_SET_SPECIFICATION = 1772
ER_MALFORMED_GTID_SET_ENCODING = 1773
ER_MALFORMED_GTID_SPECIFICATION = 1774
ER_GNO_EXHAUSTED = 1775
ER_BAD_SLAVE_AUTO_POSITION = 1776
ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON = 1777
ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET = 1778
ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON = 1779
ER_GTID_MODE_REQUIRES_BINLOG = 1780
ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF = 1781
ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON = 1782
ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF = 1783
ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF = 1784
ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE = 1785
ER_GTID_UNSAFE_CREATE_SELECT = 1786
ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION = 1787
ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME = 1788
ER_MASTER_HAS_PURGED_REQUIRED_GTIDS = 1789
ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID = 1790
ER_UNKNOWN_EXPLAIN_FORMAT = 1791
ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION = 1792
ER_TOO_LONG_TABLE_PARTITION_COMMENT = 1793
ER_SLAVE_CONFIGURATION = 1794
ER_INNODB_FT_LIMIT = 1795
ER_INNODB_NO_FT_TEMP_TABLE = 1796
ER_INNODB_FT_WRONG_DOCID_COLUMN = 1797
ER_INNODB_FT_WRONG_DOCID_INDEX = 1798
ER_INNODB_ONLINE_LOG_TOO_BIG = 1799
ER_UNKNOWN_ALTER_ALGORITHM = 1800
ER_UNKNOWN_ALTER_LOCK = 1801
ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS = 1802
ER_MTS_RECOVERY_FAILURE = 1803
ER_MTS_RESET_WORKERS = 1804
ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 = 1805
ER_SLAVE_SILENT_RETRY_TRANSACTION = 1806
ER_DISCARD_FK_CHECKS_RUNNING = 1807
ER_TABLE_SCHEMA_MISMATCH = 1808
ER_TABLE_IN_SYSTEM_TABLESPACE = 1809
ER_IO_READ_ERROR = 1810
ER_IO_WRITE_ERROR = 1811
ER_TABLESPACE_MISSING = 1812
ER_TABLESPACE_EXISTS = 1813
ER_TABLESPACE_DISCARDED = 1814
ER_INTERNAL_ERROR = 1815
ER_INNODB_IMPORT_ERROR = 1816
ER_INNODB_INDEX_CORRUPT = 1817
ER_INVALID_YEAR_COLUMN_LENGTH = 1818
ER_NOT_VALID_PASSWORD = 1819
ER_MUST_CHANGE_PASSWORD = 1820
ER_FK_NO_INDEX_CHILD = 1821
ER_FK_NO_INDEX_PARENT = 1822
ER_FK_FAIL_ADD_SYSTEM = 1823
ER_FK_CANNOT_OPEN_PARENT = 1824
ER_FK_INCORRECT_OPTION = 1825
ER_FK_DUP_NAME = 1826
ER_PASSWORD_FORMAT = 1827
ER_FK_COLUMN_CANNOT_DROP = 1828
ER_FK_COLUMN_CANNOT_DROP_CHILD = 1829
ER_FK_COLUMN_NOT_NULL = 1830
ER_DUP_INDEX = 1831
ER_FK_COLUMN_CANNOT_CHANGE = 1832
ER_FK_COLUMN_CANNOT_CHANGE_CHILD = 1833
ER_FK_CANNOT_DELETE_PARENT = 1834
ER_MALFORMED_PACKET = 1835
ER_READ_ONLY_MODE = 1836
ER_GTID_NEXT_TYPE_UNDEFINED_GROUP = 1837
ER_VARIABLE_NOT_SETTABLE_IN_SP = 1838
ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF = 1839
ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY = 1840
ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY = 1841
ER_GTID_PURGED_WAS_CHANGED = 1842
ER_GTID_EXECUTED_WAS_CHANGED = 1843
ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES = 1844
ER_ALTER_OPERATION_NOT_SUPPORTED = 1845
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON = 1846
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY = 1847
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION = 1848
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME = 1849
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE = 1850
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK = 1851
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE = 1852
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK = 1853
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC = 1854
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS = 1855
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS = 1856
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS = 1857
ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE = 1858
ER_DUP_UNKNOWN_IN_INDEX = 1859
ER_IDENT_CAUSES_TOO_LONG_PATH = 1860
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL = 1861
ER_MUST_CHANGE_PASSWORD_LOGIN = 1862
ER_ROW_IN_WRONG_PARTITION = 1863
ER_ERROR_LAST = 1863
)

868
vendor/github.com/siddontang/go-mysql/mysql/errname.go generated vendored Normal file
View File

@@ -0,0 +1,868 @@
package mysql
var MySQLErrName = map[uint16]string{
ER_HASHCHK: "hashchk",
ER_NISAMCHK: "isamchk",
ER_NO: "NO",
ER_YES: "YES",
ER_CANT_CREATE_FILE: "Can't create file '%-.200s' (errno: %d - %s)",
ER_CANT_CREATE_TABLE: "Can't create table '%-.200s' (errno: %d)",
ER_CANT_CREATE_DB: "Can't create database '%-.192s' (errno: %d)",
ER_DB_CREATE_EXISTS: "Can't create database '%-.192s'; database exists",
ER_DB_DROP_EXISTS: "Can't drop database '%-.192s'; database doesn't exist",
ER_DB_DROP_DELETE: "Error dropping database (can't delete '%-.192s', errno: %d)",
ER_DB_DROP_RMDIR: "Error dropping database (can't rmdir '%-.192s', errno: %d)",
ER_CANT_DELETE_FILE: "Error on delete of '%-.192s' (errno: %d - %s)",
ER_CANT_FIND_SYSTEM_REC: "Can't read record in system table",
ER_CANT_GET_STAT: "Can't get status of '%-.200s' (errno: %d - %s)",
ER_CANT_GET_WD: "Can't get working directory (errno: %d - %s)",
ER_CANT_LOCK: "Can't lock file (errno: %d - %s)",
ER_CANT_OPEN_FILE: "Can't open file: '%-.200s' (errno: %d - %s)",
ER_FILE_NOT_FOUND: "Can't find file: '%-.200s' (errno: %d - %s)",
ER_CANT_READ_DIR: "Can't read dir of '%-.192s' (errno: %d - %s)",
ER_CANT_SET_WD: "Can't change dir to '%-.192s' (errno: %d - %s)",
ER_CHECKREAD: "Record has changed since last read in table '%-.192s'",
ER_DISK_FULL: "Disk full (%s); waiting for someone to free some space... (errno: %d - %s)",
ER_DUP_KEY: "Can't write; duplicate key in table '%-.192s'",
ER_ERROR_ON_CLOSE: "Error on close of '%-.192s' (errno: %d - %s)",
ER_ERROR_ON_READ: "Error reading file '%-.200s' (errno: %d - %s)",
ER_ERROR_ON_RENAME: "Error on rename of '%-.210s' to '%-.210s' (errno: %d - %s)",
ER_ERROR_ON_WRITE: "Error writing file '%-.200s' (errno: %d - %s)",
ER_FILE_USED: "'%-.192s' is locked against change",
ER_FILSORT_ABORT: "Sort aborted",
ER_FORM_NOT_FOUND: "View '%-.192s' doesn't exist for '%-.192s'",
ER_GET_ERRNO: "Got error %d from storage engine",
ER_ILLEGAL_HA: "Table storage engine for '%-.192s' doesn't have this option",
ER_KEY_NOT_FOUND: "Can't find record in '%-.192s'",
ER_NOT_FORM_FILE: "Incorrect information in file: '%-.200s'",
ER_NOT_KEYFILE: "Incorrect key file for table '%-.200s'; try to repair it",
ER_OLD_KEYFILE: "Old key file for table '%-.192s'; repair it!",
ER_OPEN_AS_READONLY: "Table '%-.192s' is read only",
ER_OUTOFMEMORY: "Out of memory; restart server and try again (needed %d bytes)",
ER_OUT_OF_SORTMEMORY: "Out of sort memory, consider increasing server sort buffer size",
ER_UNEXPECTED_EOF: "Unexpected EOF found when reading file '%-.192s' (errno: %d - %s)",
ER_CON_COUNT_ERROR: "Too many connections",
ER_OUT_OF_RESOURCES: "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space",
ER_BAD_HOST_ERROR: "Can't get hostname for your address",
ER_HANDSHAKE_ERROR: "Bad handshake",
ER_DBACCESS_DENIED_ERROR: "Access denied for user '%-.48s'@'%-.64s' to database '%-.192s'",
ER_ACCESS_DENIED_ERROR: "Access denied for user '%-.48s'@'%-.64s' (using password: %s)",
ER_NO_DB_ERROR: "No database selected",
ER_UNKNOWN_COM_ERROR: "Unknown command",
ER_BAD_NULL_ERROR: "Column '%-.192s' cannot be null",
ER_BAD_DB_ERROR: "Unknown database '%-.192s'",
ER_TABLE_EXISTS_ERROR: "Table '%-.192s' already exists",
ER_BAD_TABLE_ERROR: "Unknown table '%-.100s'",
ER_NON_UNIQ_ERROR: "Column '%-.192s' in %-.192s is ambiguous",
ER_SERVER_SHUTDOWN: "Server shutdown in progress",
ER_BAD_FIELD_ERROR: "Unknown column '%-.192s' in '%-.192s'",
ER_WRONG_FIELD_WITH_GROUP: "'%-.192s' isn't in GROUP BY",
ER_WRONG_GROUP_FIELD: "Can't group on '%-.192s'",
ER_WRONG_SUM_SELECT: "Statement has sum functions and columns in same statement",
ER_WRONG_VALUE_COUNT: "Column count doesn't match value count",
ER_TOO_LONG_IDENT: "Identifier name '%-.100s' is too long",
ER_DUP_FIELDNAME: "Duplicate column name '%-.192s'",
ER_DUP_KEYNAME: "Duplicate key name '%-.192s'",
ER_DUP_ENTRY: "Duplicate entry '%-.192s' for key %d",
ER_WRONG_FIELD_SPEC: "Incorrect column specifier for column '%-.192s'",
ER_PARSE_ERROR: "%s near '%-.80s' at line %d",
ER_EMPTY_QUERY: "Query was empty",
ER_NONUNIQ_TABLE: "Not unique table/alias: '%-.192s'",
ER_INVALID_DEFAULT: "Invalid default value for '%-.192s'",
ER_MULTIPLE_PRI_KEY: "Multiple primary key defined",
ER_TOO_MANY_KEYS: "Too many keys specified; max %d keys allowed",
ER_TOO_MANY_KEY_PARTS: "Too many key parts specified; max %d parts allowed",
ER_TOO_LONG_KEY: "Specified key was too long; max key length is %d bytes",
ER_KEY_COLUMN_DOES_NOT_EXITS: "Key column '%-.192s' doesn't exist in table",
ER_BLOB_USED_AS_KEY: "BLOB column '%-.192s' can't be used in key specification with the used table type",
ER_TOO_BIG_FIELDLENGTH: "Column length too big for column '%-.192s' (max = %lu); use BLOB or TEXT instead",
ER_WRONG_AUTO_KEY: "Incorrect table definition; there can be only one auto column and it must be defined as a key",
ER_READY: "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d",
ER_NORMAL_SHUTDOWN: "%s: Normal shutdown\n",
ER_GOT_SIGNAL: "%s: Got signal %d. Aborting!\n",
ER_SHUTDOWN_COMPLETE: "%s: Shutdown complete\n",
ER_FORCING_CLOSE: "%s: Forcing close of thread %ld user: '%-.48s'\n",
ER_IPSOCK_ERROR: "Can't create IP socket",
ER_NO_SUCH_INDEX: "Table '%-.192s' has no index like the one used in CREATE INDEX; recreate the table",
ER_WRONG_FIELD_TERMINATORS: "Field separator argument is not what is expected; check the manual",
ER_BLOBS_AND_NO_TERMINATED: "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'",
ER_TEXTFILE_NOT_READABLE: "The file '%-.128s' must be in the database directory or be readable by all",
ER_FILE_EXISTS_ERROR: "File '%-.200s' already exists",
ER_LOAD_INFO: "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld",
ER_ALTER_INFO: "Records: %ld Duplicates: %ld",
ER_WRONG_SUB_KEY: "Incorrect prefix key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique prefix keys",
ER_CANT_REMOVE_ALL_FIELDS: "You can't delete all columns with ALTER TABLE; use DROP TABLE instead",
ER_CANT_DROP_FIELD_OR_KEY: "Can't DROP '%-.192s'; check that column/key exists",
ER_INSERT_INFO: "Records: %ld Duplicates: %ld Warnings: %ld",
ER_UPDATE_TABLE_USED: "You can't specify target table '%-.192s' for update in FROM clause",
ER_NO_SUCH_THREAD: "Unknown thread id: %lu",
ER_KILL_DENIED_ERROR: "You are not owner of thread %lu",
ER_NO_TABLES_USED: "No tables used",
ER_TOO_BIG_SET: "Too many strings for column %-.192s and SET",
ER_NO_UNIQUE_LOGFILE: "Can't generate a unique log-filename %-.200s.(1-999)\n",
ER_TABLE_NOT_LOCKED_FOR_WRITE: "Table '%-.192s' was locked with a READ lock and can't be updated",
ER_TABLE_NOT_LOCKED: "Table '%-.192s' was not locked with LOCK TABLES",
ER_BLOB_CANT_HAVE_DEFAULT: "BLOB/TEXT column '%-.192s' can't have a default value",
ER_WRONG_DB_NAME: "Incorrect database name '%-.100s'",
ER_WRONG_TABLE_NAME: "Incorrect table name '%-.100s'",
ER_TOO_BIG_SELECT: "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay",
ER_UNKNOWN_ERROR: "Unknown error",
ER_UNKNOWN_PROCEDURE: "Unknown procedure '%-.192s'",
ER_WRONG_PARAMCOUNT_TO_PROCEDURE: "Incorrect parameter count to procedure '%-.192s'",
ER_WRONG_PARAMETERS_TO_PROCEDURE: "Incorrect parameters to procedure '%-.192s'",
ER_UNKNOWN_TABLE: "Unknown table '%-.192s' in %-.32s",
ER_FIELD_SPECIFIED_TWICE: "Column '%-.192s' specified twice",
ER_INVALID_GROUP_FUNC_USE: "Invalid use of group function",
ER_UNSUPPORTED_EXTENSION: "Table '%-.192s' uses an extension that doesn't exist in this MySQL version",
ER_TABLE_MUST_HAVE_COLUMNS: "A table must have at least 1 column",
ER_RECORD_FILE_FULL: "The table '%-.192s' is full",
ER_UNKNOWN_CHARACTER_SET: "Unknown character set: '%-.64s'",
ER_TOO_MANY_TABLES: "Too many tables; MySQL can only use %d tables in a join",
ER_TOO_MANY_FIELDS: "Too many columns",
ER_TOO_BIG_ROWSIZE: "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs",
ER_STACK_OVERRUN: "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld --thread_stack=#' to specify a bigger stack if needed",
ER_WRONG_OUTER_JOIN: "Cross dependency found in OUTER JOIN; examine your ON conditions",
ER_NULL_COLUMN_IN_INDEX: "Table handler doesn't support NULL in given index. Please change column '%-.192s' to be NOT NULL or use another handler",
ER_CANT_FIND_UDF: "Can't load function '%-.192s'",
ER_CANT_INITIALIZE_UDF: "Can't initialize function '%-.192s'; %-.80s",
ER_UDF_NO_PATHS: "No paths allowed for shared library",
ER_UDF_EXISTS: "Function '%-.192s' already exists",
ER_CANT_OPEN_LIBRARY: "Can't open shared library '%-.192s' (errno: %d %-.128s)",
ER_CANT_FIND_DL_ENTRY: "Can't find symbol '%-.128s' in library",
ER_FUNCTION_NOT_DEFINED: "Function '%-.192s' is not defined",
ER_HOST_IS_BLOCKED: "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'",
ER_HOST_NOT_PRIVILEGED: "Host '%-.64s' is not allowed to connect to this MySQL server",
ER_PASSWORD_ANONYMOUS_USER: "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords",
ER_PASSWORD_NOT_ALLOWED: "You must have privileges to update tables in the mysql database to be able to change passwords for others",
ER_PASSWORD_NO_MATCH: "Can't find any matching row in the user table",
ER_UPDATE_INFO: "Rows matched: %ld Changed: %ld Warnings: %ld",
ER_CANT_CREATE_THREAD: "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug",
ER_WRONG_VALUE_COUNT_ON_ROW: "Column count doesn't match value count at row %ld",
ER_CANT_REOPEN_TABLE: "Can't reopen table: '%-.192s'",
ER_INVALID_USE_OF_NULL: "Invalid use of NULL value",
ER_REGEXP_ERROR: "Got error '%-.64s' from regexp",
ER_MIX_OF_GROUP_FUNC_AND_FIELDS: "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause",
ER_NONEXISTING_GRANT: "There is no such grant defined for user '%-.48s' on host '%-.64s'",
ER_TABLEACCESS_DENIED_ERROR: "%-.128s command denied to user '%-.48s'@'%-.64s' for table '%-.64s'",
ER_COLUMNACCESS_DENIED_ERROR: "%-.16s command denied to user '%-.48s'@'%-.64s' for column '%-.192s' in table '%-.192s'",
ER_ILLEGAL_GRANT_FOR_TABLE: "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used",
ER_GRANT_WRONG_HOST_OR_USER: "The host or user argument to GRANT is too long",
ER_NO_SUCH_TABLE: "Table '%-.192s.%-.192s' doesn't exist",
ER_NONEXISTING_TABLE_GRANT: "There is no such grant defined for user '%-.48s' on host '%-.64s' on table '%-.192s'",
ER_NOT_ALLOWED_COMMAND: "The used command is not allowed with this MySQL version",
ER_SYNTAX_ERROR: "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use",
ER_DELAYED_CANT_CHANGE_LOCK: "Delayed insert thread couldn't get requested lock for table %-.192s",
ER_TOO_MANY_DELAYED_THREADS: "Too many delayed threads in use",
ER_ABORTING_CONNECTION: "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)",
ER_NET_PACKET_TOO_LARGE: "Got a packet bigger than 'max_allowed_packet' bytes",
ER_NET_READ_ERROR_FROM_PIPE: "Got a read error from the connection pipe",
ER_NET_FCNTL_ERROR: "Got an error from fcntl()",
ER_NET_PACKETS_OUT_OF_ORDER: "Got packets out of order",
ER_NET_UNCOMPRESS_ERROR: "Couldn't uncompress communication packet",
ER_NET_READ_ERROR: "Got an error reading communication packets",
ER_NET_READ_INTERRUPTED: "Got timeout reading communication packets",
ER_NET_ERROR_ON_WRITE: "Got an error writing communication packets",
ER_NET_WRITE_INTERRUPTED: "Got timeout writing communication packets",
ER_TOO_LONG_STRING: "Result string is longer than 'max_allowed_packet' bytes",
ER_TABLE_CANT_HANDLE_BLOB: "The used table type doesn't support BLOB/TEXT columns",
ER_TABLE_CANT_HANDLE_AUTO_INCREMENT: "The used table type doesn't support AUTO_INCREMENT columns",
ER_DELAYED_INSERT_TABLE_LOCKED: "INSERT DELAYED can't be used with table '%-.192s' because it is locked with LOCK TABLES",
ER_WRONG_COLUMN_NAME: "Incorrect column name '%-.100s'",
ER_WRONG_KEY_COLUMN: "The used storage engine can't index column '%-.192s'",
ER_WRONG_MRG_TABLE: "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist",
ER_DUP_UNIQUE: "Can't write, because of unique constraint, to table '%-.192s'",
ER_BLOB_KEY_WITHOUT_LENGTH: "BLOB/TEXT column '%-.192s' used in key specification without a key length",
ER_PRIMARY_CANT_HAVE_NULL: "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead",
ER_TOO_MANY_ROWS: "Result consisted of more than one row",
ER_REQUIRES_PRIMARY_KEY: "This table type requires a primary key",
ER_NO_RAID_COMPILED: "This version of MySQL is not compiled with RAID support",
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE: "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column",
ER_KEY_DOES_NOT_EXITS: "Key '%-.192s' doesn't exist in table '%-.192s'",
ER_CHECK_NO_SUCH_TABLE: "Can't open table",
ER_CHECK_NOT_IMPLEMENTED: "The storage engine for the table doesn't support %s",
ER_CANT_DO_THIS_DURING_AN_TRANSACTION: "You are not allowed to execute this command in a transaction",
ER_ERROR_DURING_COMMIT: "Got error %d during COMMIT",
ER_ERROR_DURING_ROLLBACK: "Got error %d during ROLLBACK",
ER_ERROR_DURING_FLUSH_LOGS: "Got error %d during FLUSH_LOGS",
ER_ERROR_DURING_CHECKPOINT: "Got error %d during CHECKPOINT",
ER_NEW_ABORTING_CONNECTION: "Aborted connection %ld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)",
ER_DUMP_NOT_IMPLEMENTED: "The storage engine for the table does not support binary table dump",
ER_FLUSH_MASTER_BINLOG_CLOSED: "Binlog closed, cannot RESET MASTER",
ER_INDEX_REBUILD: "Failed rebuilding the index of dumped table '%-.192s'",
ER_MASTER: "Error from master: '%-.64s'",
ER_MASTER_NET_READ: "Net error reading from master",
ER_MASTER_NET_WRITE: "Net error writing to master",
ER_FT_MATCHING_KEY_NOT_FOUND: "Can't find FULLTEXT index matching the column list",
ER_LOCK_OR_ACTIVE_TRANSACTION: "Can't execute the given command because you have active locked tables or an active transaction",
ER_UNKNOWN_SYSTEM_VARIABLE: "Unknown system variable '%-.64s'",
ER_CRASHED_ON_USAGE: "Table '%-.192s' is marked as crashed and should be repaired",
ER_CRASHED_ON_REPAIR: "Table '%-.192s' is marked as crashed and last (automatic?) repair failed",
ER_WARNING_NOT_COMPLETE_ROLLBACK: "Some non-transactional changed tables couldn't be rolled back",
ER_TRANS_CACHE_FULL: "Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again",
ER_SLAVE_MUST_STOP: "This operation cannot be performed with a running slave; run STOP SLAVE first",
ER_SLAVE_NOT_RUNNING: "This operation requires a running slave; configure slave and do START SLAVE",
ER_BAD_SLAVE: "The server is not configured as slave; fix in config file or with CHANGE MASTER TO",
ER_MASTER_INFO: "Could not initialize master info structure; more error messages can be found in the MySQL error log",
ER_SLAVE_THREAD: "Could not create slave thread; check system resources",
ER_TOO_MANY_USER_CONNECTIONS: "User %-.64s already has more than 'max_user_connections' active connections",
ER_SET_CONSTANTS_ONLY: "You may only use constant expressions with SET",
ER_LOCK_WAIT_TIMEOUT: "Lock wait timeout exceeded; try restarting transaction",
ER_LOCK_TABLE_FULL: "The total number of locks exceeds the lock table size",
ER_READ_ONLY_TRANSACTION: "Update locks cannot be acquired during a READ UNCOMMITTED transaction",
ER_DROP_DB_WITH_READ_LOCK: "DROP DATABASE not allowed while thread is holding global read lock",
ER_CREATE_DB_WITH_READ_LOCK: "CREATE DATABASE not allowed while thread is holding global read lock",
ER_WRONG_ARGUMENTS: "Incorrect arguments to %s",
ER_NO_PERMISSION_TO_CREATE_USER: "'%-.48s'@'%-.64s' is not allowed to create new users",
ER_UNION_TABLES_IN_DIFFERENT_DIR: "Incorrect table definition; all MERGE tables must be in the same database",
ER_LOCK_DEADLOCK: "Deadlock found when trying to get lock; try restarting transaction",
ER_TABLE_CANT_HANDLE_FT: "The used table type doesn't support FULLTEXT indexes",
ER_CANNOT_ADD_FOREIGN: "Cannot add foreign key constraint",
ER_NO_REFERENCED_ROW: "Cannot add or update a child row: a foreign key constraint fails",
ER_ROW_IS_REFERENCED: "Cannot delete or update a parent row: a foreign key constraint fails",
ER_CONNECT_TO_MASTER: "Error connecting to master: %-.128s",
ER_QUERY_ON_MASTER: "Error running query on master: %-.128s",
ER_ERROR_WHEN_EXECUTING_COMMAND: "Error when executing command %s: %-.128s",
ER_WRONG_USAGE: "Incorrect usage of %s and %s",
ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT: "The used SELECT statements have a different number of columns",
ER_CANT_UPDATE_WITH_READLOCK: "Can't execute the query because you have a conflicting read lock",
ER_MIXING_NOT_ALLOWED: "Mixing of transactional and non-transactional tables is disabled",
ER_DUP_ARGUMENT: "Option '%s' used twice in statement",
ER_USER_LIMIT_REACHED: "User '%-.64s' has exceeded the '%s' resource (current value: %ld)",
ER_SPECIFIC_ACCESS_DENIED_ERROR: "Access denied; you need (at least one of) the %-.128s privilege(s) for this operation",
ER_LOCAL_VARIABLE: "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL",
ER_GLOBAL_VARIABLE: "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL",
ER_NO_DEFAULT: "Variable '%-.64s' doesn't have a default value",
ER_WRONG_VALUE_FOR_VAR: "Variable '%-.64s' can't be set to the value of '%-.200s'",
ER_WRONG_TYPE_FOR_VAR: "Incorrect argument type to variable '%-.64s'",
ER_VAR_CANT_BE_READ: "Variable '%-.64s' can only be set, not read",
ER_CANT_USE_OPTION_HERE: "Incorrect usage/placement of '%s'",
ER_NOT_SUPPORTED_YET: "This version of MySQL doesn't yet support '%s'",
ER_MASTER_FATAL_ERROR_READING_BINLOG: "Got fatal error %d from master when reading data from binary log: '%-.320s'",
ER_SLAVE_IGNORED_TABLE: "Slave SQL thread ignored the query because of replicate-*-table rules",
ER_INCORRECT_GLOBAL_LOCAL_VAR: "Variable '%-.192s' is a %s variable",
ER_WRONG_FK_DEF: "Incorrect foreign key definition for '%-.192s': %s",
ER_KEY_REF_DO_NOT_MATCH_TABLE_REF: "Key reference and table reference don't match",
ER_OPERAND_COLUMNS: "Operand should contain %d column(s)",
ER_SUBQUERY_NO_1_ROW: "Subquery returns more than 1 row",
ER_UNKNOWN_STMT_HANDLER: "Unknown prepared statement handler (%.*s) given to %s",
ER_CORRUPT_HELP_DB: "Help database is corrupt or does not exist",
ER_CYCLIC_REFERENCE: "Cyclic reference on subqueries",
ER_AUTO_CONVERT: "Converting column '%s' from %s to %s",
ER_ILLEGAL_REFERENCE: "Reference '%-.64s' not supported (%s)",
ER_DERIVED_MUST_HAVE_ALIAS: "Every derived table must have its own alias",
ER_SELECT_REDUCED: "Select %u was reduced during optimization",
ER_TABLENAME_NOT_ALLOWED_HERE: "Table '%-.192s' from one of the SELECTs cannot be used in %-.32s",
ER_NOT_SUPPORTED_AUTH_MODE: "Client does not support authentication protocol requested by server; consider upgrading MySQL client",
ER_SPATIAL_CANT_HAVE_NULL: "All parts of a SPATIAL index must be NOT NULL",
ER_COLLATION_CHARSET_MISMATCH: "COLLATION '%s' is not valid for CHARACTER SET '%s'",
ER_SLAVE_WAS_RUNNING: "Slave is already running",
ER_SLAVE_WAS_NOT_RUNNING: "Slave already has been stopped",
ER_TOO_BIG_FOR_UNCOMPRESS: "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)",
ER_ZLIB_Z_MEM_ERROR: "ZLIB: Not enough memory",
ER_ZLIB_Z_BUF_ERROR: "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)",
ER_ZLIB_Z_DATA_ERROR: "ZLIB: Input data corrupted",
ER_CUT_VALUE_GROUP_CONCAT: "Row %u was cut by GROUP_CONCAT()",
ER_WARN_TOO_FEW_RECORDS: "Row %ld doesn't contain data for all columns",
ER_WARN_TOO_MANY_RECORDS: "Row %ld was truncated; it contained more data than there were input columns",
ER_WARN_NULL_TO_NOTNULL: "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld",
ER_WARN_DATA_OUT_OF_RANGE: "Out of range value for column '%s' at row %ld",
WARN_DATA_TRUNCATED: "Data truncated for column '%s' at row %ld",
ER_WARN_USING_OTHER_HANDLER: "Using storage engine %s for table '%s'",
ER_CANT_AGGREGATE_2COLLATIONS: "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'",
ER_DROP_USER: "Cannot drop one or more of the requested users",
ER_REVOKE_GRANTS: "Can't revoke all privileges for one or more of the requested users",
ER_CANT_AGGREGATE_3COLLATIONS: "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'",
ER_CANT_AGGREGATE_NCOLLATIONS: "Illegal mix of collations for operation '%s'",
ER_VARIABLE_IS_NOT_STRUCT: "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)",
ER_UNKNOWN_COLLATION: "Unknown collation: '%-.64s'",
ER_SLAVE_IGNORED_SSL_PARAMS: "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started",
ER_SERVER_IS_IN_SECURE_AUTH_MODE: "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format",
ER_WARN_FIELD_RESOLVED: "Field or reference '%-.192s%s%-.192s%s%-.192s' of SELECT #%d was resolved in SELECT #%d",
ER_BAD_SLAVE_UNTIL_COND: "Incorrect parameter or combination of parameters for START SLAVE UNTIL",
ER_MISSING_SKIP_SLAVE: "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart",
ER_UNTIL_COND_IGNORED: "SQL thread is not to be started so UNTIL options are ignored",
ER_WRONG_NAME_FOR_INDEX: "Incorrect index name '%-.100s'",
ER_WRONG_NAME_FOR_CATALOG: "Incorrect catalog name '%-.100s'",
ER_WARN_QC_RESIZE: "Query cache failed to set size %lu; new query cache size is %lu",
ER_BAD_FT_COLUMN: "Column '%-.192s' cannot be part of FULLTEXT index",
ER_UNKNOWN_KEY_CACHE: "Unknown key cache '%-.100s'",
ER_WARN_HOSTNAME_WONT_WORK: "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work",
ER_UNKNOWN_STORAGE_ENGINE: "Unknown storage engine '%s'",
ER_WARN_DEPRECATED_SYNTAX: "'%s' is deprecated and will be removed in a future release. Please use %s instead",
ER_NON_UPDATABLE_TABLE: "The target table %-.100s of the %s is not updatable",
ER_FEATURE_DISABLED: "The '%s' feature is disabled; you need MySQL built with '%s' to have it working",
ER_OPTION_PREVENTS_STATEMENT: "The MySQL server is running with the %s option so it cannot execute this statement",
ER_DUPLICATED_VALUE_IN_TYPE: "Column '%-.100s' has duplicated value '%-.64s' in %s",
ER_TRUNCATED_WRONG_VALUE: "Truncated incorrect %-.32s value: '%-.128s'",
ER_TOO_MUCH_AUTO_TIMESTAMP_COLS: "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause",
ER_INVALID_ON_UPDATE: "Invalid ON UPDATE clause for '%-.192s' column",
ER_UNSUPPORTED_PS: "This command is not supported in the prepared statement protocol yet",
ER_GET_ERRMSG: "Got error %d '%-.100s' from %s",
ER_GET_TEMPORARY_ERRMSG: "Got temporary error %d '%-.100s' from %s",
ER_UNKNOWN_TIME_ZONE: "Unknown or incorrect time zone: '%-.64s'",
ER_WARN_INVALID_TIMESTAMP: "Invalid TIMESTAMP value in column '%s' at row %ld",
ER_INVALID_CHARACTER_STRING: "Invalid %s character string: '%.64s'",
ER_WARN_ALLOWED_PACKET_OVERFLOWED: "Result of %s() was larger than max_allowed_packet (%ld) - truncated",
ER_CONFLICTING_DECLARATIONS: "Conflicting declarations: '%s%s' and '%s%s'",
ER_SP_NO_RECURSIVE_CREATE: "Can't create a %s from within another stored routine",
ER_SP_ALREADY_EXISTS: "%s %s already exists",
ER_SP_DOES_NOT_EXIST: "%s %s does not exist",
ER_SP_DROP_FAILED: "Failed to DROP %s %s",
ER_SP_STORE_FAILED: "Failed to CREATE %s %s",
ER_SP_LILABEL_MISMATCH: "%s with no matching label: %s",
ER_SP_LABEL_REDEFINE: "Redefining label %s",
ER_SP_LABEL_MISMATCH: "End-label %s without match",
ER_SP_UNINIT_VAR: "Referring to uninitialized variable %s",
ER_SP_BADSELECT: "PROCEDURE %s can't return a result set in the given context",
ER_SP_BADRETURN: "RETURN is only allowed in a FUNCTION",
ER_SP_BADSTATEMENT: "%s is not allowed in stored procedures",
ER_UPDATE_LOG_DEPRECATED_IGNORED: "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored.",
ER_UPDATE_LOG_DEPRECATED_TRANSLATED: "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN.",
ER_QUERY_INTERRUPTED: "Query execution was interrupted",
ER_SP_WRONG_NO_OF_ARGS: "Incorrect number of arguments for %s %s; expected %u, got %u",
ER_SP_COND_MISMATCH: "Undefined CONDITION: %s",
ER_SP_NORETURN: "No RETURN found in FUNCTION %s",
ER_SP_NORETURNEND: "FUNCTION %s ended without RETURN",
ER_SP_BAD_CURSOR_QUERY: "Cursor statement must be a SELECT",
ER_SP_BAD_CURSOR_SELECT: "Cursor SELECT must not have INTO",
ER_SP_CURSOR_MISMATCH: "Undefined CURSOR: %s",
ER_SP_CURSOR_ALREADY_OPEN: "Cursor is already open",
ER_SP_CURSOR_NOT_OPEN: "Cursor is not open",
ER_SP_UNDECLARED_VAR: "Undeclared variable: %s",
ER_SP_WRONG_NO_OF_FETCH_ARGS: "Incorrect number of FETCH variables",
ER_SP_FETCH_NO_DATA: "No data - zero rows fetched, selected, or processed",
ER_SP_DUP_PARAM: "Duplicate parameter: %s",
ER_SP_DUP_VAR: "Duplicate variable: %s",
ER_SP_DUP_COND: "Duplicate condition: %s",
ER_SP_DUP_CURS: "Duplicate cursor: %s",
ER_SP_CANT_ALTER: "Failed to ALTER %s %s",
ER_SP_SUBSELECT_NYI: "Subquery value not supported",
ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG: "%s is not allowed in stored function or trigger",
ER_SP_VARCOND_AFTER_CURSHNDLR: "Variable or condition declaration after cursor or handler declaration",
ER_SP_CURSOR_AFTER_HANDLER: "Cursor declaration after handler declaration",
ER_SP_CASE_NOT_FOUND: "Case not found for CASE statement",
ER_FPARSER_TOO_BIG_FILE: "Configuration file '%-.192s' is too big",
ER_FPARSER_BAD_HEADER: "Malformed file type header in file '%-.192s'",
ER_FPARSER_EOF_IN_COMMENT: "Unexpected end of file while parsing comment '%-.200s'",
ER_FPARSER_ERROR_IN_PARAMETER: "Error while parsing parameter '%-.192s' (line: '%-.192s')",
ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER: "Unexpected end of file while skipping unknown parameter '%-.192s'",
ER_VIEW_NO_EXPLAIN: "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table",
ER_FRM_UNKNOWN_TYPE: "File '%-.192s' has unknown type '%-.64s' in its header",
ER_WRONG_OBJECT: "'%-.192s.%-.192s' is not %s",
ER_NONUPDATEABLE_COLUMN: "Column '%-.192s' is not updatable",
ER_VIEW_SELECT_DERIVED: "View's SELECT contains a subquery in the FROM clause",
ER_VIEW_SELECT_CLAUSE: "View's SELECT contains a '%s' clause",
ER_VIEW_SELECT_VARIABLE: "View's SELECT contains a variable or parameter",
ER_VIEW_SELECT_TMPTABLE: "View's SELECT refers to a temporary table '%-.192s'",
ER_VIEW_WRONG_LIST: "View's SELECT and view's field list have different column counts",
ER_WARN_VIEW_MERGE: "View merge algorithm can't be used here for now (assumed undefined algorithm)",
ER_WARN_VIEW_WITHOUT_KEY: "View being updated does not have complete key of underlying table in it",
ER_VIEW_INVALID: "View '%-.192s.%-.192s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them",
ER_SP_NO_DROP_SP: "Can't drop or alter a %s from within another stored routine",
ER_SP_GOTO_IN_HNDLR: "GOTO is not allowed in a stored procedure handler",
ER_TRG_ALREADY_EXISTS: "Trigger already exists",
ER_TRG_DOES_NOT_EXIST: "Trigger does not exist",
ER_TRG_ON_VIEW_OR_TEMP_TABLE: "Trigger's '%-.192s' is view or temporary table",
ER_TRG_CANT_CHANGE_ROW: "Updating of %s row is not allowed in %strigger",
ER_TRG_NO_SUCH_ROW_IN_TRG: "There is no %s row in %s trigger",
ER_NO_DEFAULT_FOR_FIELD: "Field '%-.192s' doesn't have a default value",
ER_DIVISION_BY_ZERO: "Division by 0",
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD: "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %ld",
ER_ILLEGAL_VALUE_FOR_TYPE: "Illegal %s '%-.192s' value found during parsing",
ER_VIEW_NONUPD_CHECK: "CHECK OPTION on non-updatable view '%-.192s.%-.192s'",
ER_VIEW_CHECK_FAILED: "CHECK OPTION failed '%-.192s.%-.192s'",
ER_PROCACCESS_DENIED_ERROR: "%-.16s command denied to user '%-.48s'@'%-.64s' for routine '%-.192s'",
ER_RELAY_LOG_FAIL: "Failed purging old relay logs: %s",
ER_PASSWD_LENGTH: "Password hash should be a %d-digit hexadecimal number",
ER_UNKNOWN_TARGET_BINLOG: "Target log not found in binlog index",
ER_IO_ERR_LOG_INDEX_READ: "I/O error reading log index file",
ER_BINLOG_PURGE_PROHIBITED: "Server configuration does not permit binlog purge",
ER_FSEEK_FAIL: "Failed on fseek()",
ER_BINLOG_PURGE_FATAL_ERR: "Fatal error during log purge",
ER_LOG_IN_USE: "A purgeable log is in use, will not purge",
ER_LOG_PURGE_UNKNOWN_ERR: "Unknown error during log purge",
ER_RELAY_LOG_INIT: "Failed initializing relay log position: %s",
ER_NO_BINARY_LOGGING: "You are not using binary logging",
ER_RESERVED_SYNTAX: "The '%-.64s' syntax is reserved for purposes internal to the MySQL server",
ER_WSAS_FAILED: "WSAStartup Failed",
ER_DIFF_GROUPS_PROC: "Can't handle procedures with different groups yet",
ER_NO_GROUP_FOR_PROC: "Select must have a group with this procedure",
ER_ORDER_WITH_PROC: "Can't use ORDER clause with this procedure",
ER_LOGGING_PROHIBIT_CHANGING_OF: "Binary logging and replication forbid changing the global server %s",
ER_NO_FILE_MAPPING: "Can't map file: %-.200s, errno: %d",
ER_WRONG_MAGIC: "Wrong magic in %-.64s",
ER_PS_MANY_PARAM: "Prepared statement contains too many placeholders",
ER_KEY_PART_0: "Key part '%-.192s' length cannot be 0",
ER_VIEW_CHECKSUM: "View text checksum failed",
ER_VIEW_MULTIUPDATE: "Can not modify more than one base table through a join view '%-.192s.%-.192s'",
ER_VIEW_NO_INSERT_FIELD_LIST: "Can not insert into join view '%-.192s.%-.192s' without fields list",
ER_VIEW_DELETE_MERGE_VIEW: "Can not delete from join view '%-.192s.%-.192s'",
ER_CANNOT_USER: "Operation %s failed for %.256s",
ER_XAER_NOTA: "XAER_NOTA: Unknown XID",
ER_XAER_INVAL: "XAER_INVAL: Invalid arguments (or unsupported command)",
ER_XAER_RMFAIL: "XAER_RMFAIL: The command cannot be executed when global transaction is in the %.64s state",
ER_XAER_OUTSIDE: "XAER_OUTSIDE: Some work is done outside global transaction",
ER_XAER_RMERR: "XAER_RMERR: Fatal error occurred in the transaction branch - check your data for consistency",
ER_XA_RBROLLBACK: "XA_RBROLLBACK: Transaction branch was rolled back",
ER_NONEXISTING_PROC_GRANT: "There is no such grant defined for user '%-.48s' on host '%-.64s' on routine '%-.192s'",
ER_PROC_AUTO_GRANT_FAIL: "Failed to grant EXECUTE and ALTER ROUTINE privileges",
ER_PROC_AUTO_REVOKE_FAIL: "Failed to revoke all privileges to dropped routine",
ER_DATA_TOO_LONG: "Data too long for column '%s' at row %ld",
ER_SP_BAD_SQLSTATE: "Bad SQLSTATE: '%s'",
ER_STARTUP: "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s",
ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR: "Can't load value from file with fixed size rows to variable",
ER_CANT_CREATE_USER_WITH_GRANT: "You are not allowed to create a user with GRANT",
ER_WRONG_VALUE_FOR_TYPE: "Incorrect %-.32s value: '%-.128s' for function %-.32s",
ER_TABLE_DEF_CHANGED: "Table definition has changed, please retry transaction",
ER_SP_DUP_HANDLER: "Duplicate handler declared in the same block",
ER_SP_NOT_VAR_ARG: "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger",
ER_SP_NO_RETSET: "Not allowed to return a result set from a %s",
ER_CANT_CREATE_GEOMETRY_OBJECT: "Cannot get geometry object from data you send to the GEOMETRY field",
ER_FAILED_ROUTINE_BREAK_BINLOG: "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes",
ER_BINLOG_UNSAFE_ROUTINE: "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)",
ER_BINLOG_CREATE_ROUTINE_NEED_SUPER: "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)",
ER_EXEC_STMT_WITH_OPEN_CURSOR: "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it.",
ER_STMT_HAS_NO_OPEN_CURSOR: "The statement (%lu) has no open cursor.",
ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG: "Explicit or implicit commit is not allowed in stored function or trigger.",
ER_NO_DEFAULT_FOR_VIEW_FIELD: "Field of view '%-.192s.%-.192s' underlying table doesn't have a default value",
ER_SP_NO_RECURSION: "Recursive stored functions and triggers are not allowed.",
ER_TOO_BIG_SCALE: "Too big scale %d specified for column '%-.192s'. Maximum is %lu.",
ER_TOO_BIG_PRECISION: "Too big precision %d specified for column '%-.192s'. Maximum is %lu.",
ER_M_BIGGER_THAN_D: "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.192s').",
ER_WRONG_LOCK_OF_SYSTEM_TABLE: "You can't combine write-locking of system tables with other tables or lock types",
ER_CONNECT_TO_FOREIGN_DATA_SOURCE: "Unable to connect to foreign data source: %.64s",
ER_QUERY_ON_FOREIGN_DATA_SOURCE: "There was a problem processing the query on the foreign data source. Data source error: %-.64s",
ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST: "The foreign data source you are trying to reference does not exist. Data source error: %-.64s",
ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE: "Can't create federated table. The data source connection string '%-.64s' is not in the correct format",
ER_FOREIGN_DATA_STRING_INVALID: "The data source connection string '%-.64s' is not in the correct format",
ER_CANT_CREATE_FEDERATED_TABLE: "Can't create federated table. Foreign data src error: %-.64s",
ER_TRG_IN_WRONG_SCHEMA: "Trigger in wrong schema",
ER_STACK_OVERRUN_NEED_MORE: "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack.",
ER_TOO_LONG_BODY: "Routine body for '%-.100s' is too long",
ER_WARN_CANT_DROP_DEFAULT_KEYCACHE: "Cannot drop default keycache",
ER_TOO_BIG_DISPLAYWIDTH: "Display width out of range for column '%-.192s' (max = %lu)",
ER_XAER_DUPID: "XAER_DUPID: The XID already exists",
ER_DATETIME_FUNCTION_OVERFLOW: "Datetime function: %-.32s field overflow",
ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG: "Can't update table '%-.192s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger.",
ER_VIEW_PREVENT_UPDATE: "The definition of table '%-.192s' prevents operation %.192s on table '%-.192s'.",
ER_PS_NO_RECURSION: "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner",
ER_SP_CANT_SET_AUTOCOMMIT: "Not allowed to set autocommit from a stored function or trigger",
ER_MALFORMED_DEFINER: "Definer is not fully qualified",
ER_VIEW_FRM_NO_USER: "View '%-.192s'.'%-.192s' has no definer information (old table format). Current user is used as definer. Please recreate the view!",
ER_VIEW_OTHER_USER: "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer",
ER_NO_SUCH_USER: "The user specified as a definer ('%-.64s'@'%-.64s') does not exist",
ER_FORBID_SCHEMA_CHANGE: "Changing schema from '%-.192s' to '%-.192s' is not allowed.",
ER_ROW_IS_REFERENCED_2: "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)",
ER_NO_REFERENCED_ROW_2: "Cannot add or update a child row: a foreign key constraint fails (%.192s)",
ER_SP_BAD_VAR_SHADOW: "Variable '%-.64s' must be quoted with `...`, or renamed",
ER_TRG_NO_DEFINER: "No definer attribute for trigger '%-.192s'.'%-.192s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger.",
ER_OLD_FILE_FORMAT: "'%-.192s' has an old format, you should re-create the '%s' object(s)",
ER_SP_RECURSION_LIMIT: "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.192s",
ER_SP_PROC_TABLE_CORRUPT: "Failed to load routine %-.192s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)",
ER_SP_WRONG_NAME: "Incorrect routine name '%-.192s'",
ER_TABLE_NEEDS_UPGRADE: "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" or dump/reload to fix it!",
ER_SP_NO_AGGREGATE: "AGGREGATE is not supported for stored functions",
ER_MAX_PREPARED_STMT_COUNT_REACHED: "Can't create more than max_prepared_stmt_count statements (current value: %lu)",
ER_VIEW_RECURSIVE: "`%-.192s`.`%-.192s` contains view recursion",
ER_NON_GROUPING_FIELD_USED: "Non-grouping field '%-.192s' is used in %-.64s clause",
ER_TABLE_CANT_HANDLE_SPKEYS: "The used table type doesn't support SPATIAL indexes",
ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA: "Triggers can not be created on system tables",
ER_REMOVED_SPACES: "Leading spaces are removed from name '%s'",
ER_AUTOINC_READ_FAILED: "Failed to read auto-increment value from storage engine",
ER_USERNAME: "user name",
ER_HOSTNAME: "host name",
ER_WRONG_STRING_LENGTH: "String '%-.70s' is too long for %s (should be no longer than %d)",
ER_NON_INSERTABLE_TABLE: "The target table %-.100s of the %s is not insertable-into",
ER_ADMIN_WRONG_MRG_TABLE: "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist",
ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT: "Too high level of nesting for select",
ER_NAME_BECOMES_EMPTY: "Name '%-.64s' has become ''",
ER_AMBIGUOUS_FIELD_TERM: "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY",
ER_FOREIGN_SERVER_EXISTS: "The foreign server, %s, you are trying to create already exists.",
ER_FOREIGN_SERVER_DOESNT_EXIST: "The foreign server name you are trying to reference does not exist. Data source error: %-.64s",
ER_ILLEGAL_HA_CREATE_OPTION: "Table storage engine '%-.64s' does not support the create option '%.64s'",
ER_PARTITION_REQUIRES_VALUES_ERROR: "Syntax error: %-.64s PARTITIONING requires definition of VALUES %-.64s for each partition",
ER_PARTITION_WRONG_VALUES_ERROR: "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition",
ER_PARTITION_MAXVALUE_ERROR: "MAXVALUE can only be used in last partition definition",
ER_PARTITION_SUBPARTITION_ERROR: "Subpartitions can only be hash partitions and by key",
ER_PARTITION_SUBPART_MIX_ERROR: "Must define subpartitions on all partitions if on one partition",
ER_PARTITION_WRONG_NO_PART_ERROR: "Wrong number of partitions defined, mismatch with previous setting",
ER_PARTITION_WRONG_NO_SUBPART_ERROR: "Wrong number of subpartitions defined, mismatch with previous setting",
ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR: "Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed",
ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR: "Expression in RANGE/LIST VALUES must be constant",
ER_FIELD_NOT_FOUND_PART_ERROR: "Field in list of fields for partition function not found in table",
ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR: "List of fields is only allowed in KEY partitions",
ER_INCONSISTENT_PARTITION_INFO_ERROR: "The partition info in the frm file is not consistent with what can be written into the frm file",
ER_PARTITION_FUNC_NOT_ALLOWED_ERROR: "The %-.192s function returns the wrong type",
ER_PARTITIONS_MUST_BE_DEFINED_ERROR: "For %-.64s partitions each partition must be defined",
ER_RANGE_NOT_INCREASING_ERROR: "VALUES LESS THAN value must be strictly increasing for each partition",
ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR: "VALUES value must be of same type as partition function",
ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR: "Multiple definition of same constant in list partitioning",
ER_PARTITION_ENTRY_ERROR: "Partitioning can not be used stand-alone in query",
ER_MIX_HANDLER_ERROR: "The mix of handlers in the partitions is not allowed in this version of MySQL",
ER_PARTITION_NOT_DEFINED_ERROR: "For the partitioned engine it is necessary to define all %-.64s",
ER_TOO_MANY_PARTITIONS_ERROR: "Too many partitions (including subpartitions) were defined",
ER_SUBPARTITION_ERROR: "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning",
ER_CANT_CREATE_HANDLER_FILE: "Failed to create specific handler file",
ER_BLOB_FIELD_IN_PART_FUNC_ERROR: "A BLOB field is not allowed in partition function",
ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF: "A %-.192s must include all columns in the table's partitioning function",
ER_NO_PARTS_ERROR: "Number of %-.64s = 0 is not an allowed value",
ER_PARTITION_MGMT_ON_NONPARTITIONED: "Partition management on a not partitioned table is not possible",
ER_FOREIGN_KEY_ON_PARTITIONED: "Foreign key clause is not yet supported in conjunction with partitioning",
ER_DROP_PARTITION_NON_EXISTENT: "Error in list of partitions to %-.64s",
ER_DROP_LAST_PARTITION: "Cannot remove all partitions, use DROP TABLE instead",
ER_COALESCE_ONLY_ON_HASH_PARTITION: "COALESCE PARTITION can only be used on HASH/KEY partitions",
ER_REORG_HASH_ONLY_ON_SAME_NO: "REORGANIZE PARTITION can only be used to reorganize partitions not to change their numbers",
ER_REORG_NO_PARAM_ERROR: "REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs",
ER_ONLY_ON_RANGE_LIST_PARTITION: "%-.64s PARTITION can only be used on RANGE/LIST partitions",
ER_ADD_PARTITION_SUBPART_ERROR: "Trying to Add partition(s) with wrong number of subpartitions",
ER_ADD_PARTITION_NO_NEW_PARTITION: "At least one partition must be added",
ER_COALESCE_PARTITION_NO_PARTITION: "At least one partition must be coalesced",
ER_REORG_PARTITION_NOT_EXIST: "More partitions to reorganize than there are partitions",
ER_SAME_NAME_PARTITION: "Duplicate partition name %-.192s",
ER_NO_BINLOG_ERROR: "It is not allowed to shut off binlog on this command",
ER_CONSECUTIVE_REORG_PARTITIONS: "When reorganizing a set of partitions they must be in consecutive order",
ER_REORG_OUTSIDE_RANGE: "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range",
ER_PARTITION_FUNCTION_FAILURE: "Partition function not supported in this version for this handler",
ER_PART_STATE_ERROR: "Partition state cannot be defined from CREATE/ALTER TABLE",
ER_LIMITED_PART_RANGE: "The %-.64s handler only supports 32 bit integers in VALUES",
ER_PLUGIN_IS_NOT_LOADED: "Plugin '%-.192s' is not loaded",
ER_WRONG_VALUE: "Incorrect %-.32s value: '%-.128s'",
ER_NO_PARTITION_FOR_GIVEN_VALUE: "Table has no partition for value %-.64s",
ER_FILEGROUP_OPTION_ONLY_ONCE: "It is not allowed to specify %s more than once",
ER_CREATE_FILEGROUP_FAILED: "Failed to create %s",
ER_DROP_FILEGROUP_FAILED: "Failed to drop %s",
ER_TABLESPACE_AUTO_EXTEND_ERROR: "The handler doesn't support autoextend of tablespaces",
ER_WRONG_SIZE_NUMBER: "A size parameter was incorrectly specified, either number or on the form 10M",
ER_SIZE_OVERFLOW_ERROR: "The size number was correct but we don't allow the digit part to be more than 2 billion",
ER_ALTER_FILEGROUP_FAILED: "Failed to alter: %s",
ER_BINLOG_ROW_LOGGING_FAILED: "Writing one row to the row-based binary log failed",
ER_BINLOG_ROW_WRONG_TABLE_DEF: "Table definition on master and slave does not match: %s",
ER_BINLOG_ROW_RBR_TO_SBR: "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events",
ER_EVENT_ALREADY_EXISTS: "Event '%-.192s' already exists",
ER_EVENT_STORE_FAILED: "Failed to store event %s. Error code %d from storage engine.",
ER_EVENT_DOES_NOT_EXIST: "Unknown event '%-.192s'",
ER_EVENT_CANT_ALTER: "Failed to alter event '%-.192s'",
ER_EVENT_DROP_FAILED: "Failed to drop %s",
ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG: "INTERVAL is either not positive or too big",
ER_EVENT_ENDS_BEFORE_STARTS: "ENDS is either invalid or before STARTS",
ER_EVENT_EXEC_TIME_IN_THE_PAST: "Event execution time is in the past. Event has been disabled",
ER_EVENT_OPEN_TABLE_FAILED: "Failed to open mysql.event",
ER_EVENT_NEITHER_M_EXPR_NOR_M_AT: "No datetime expression provided",
ER_OBSOLETE_COL_COUNT_DOESNT_MATCH_CORRUPTED: "Column count of mysql.%s is wrong. Expected %d, found %d. The table is probably corrupted",
ER_OBSOLETE_CANNOT_LOAD_FROM_TABLE: "Cannot load from mysql.%s. The table is probably corrupted",
ER_EVENT_CANNOT_DELETE: "Failed to delete the event from mysql.event",
ER_EVENT_COMPILE_ERROR: "Error during compilation of event's body",
ER_EVENT_SAME_NAME: "Same old and new event name",
ER_EVENT_DATA_TOO_LONG: "Data for column '%s' too long",
ER_DROP_INDEX_FK: "Cannot drop index '%-.192s': needed in a foreign key constraint",
ER_WARN_DEPRECATED_SYNTAX_WITH_VER: "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead",
ER_CANT_WRITE_LOCK_LOG_TABLE: "You can't write-lock a log table. Only read access is possible",
ER_CANT_LOCK_LOG_TABLE: "You can't use locks with log tables.",
ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED: "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry",
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE: "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysql_upgrade to fix this error.",
ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR: "Cannot switch out of the row-based binary log format when the session has open temporary tables",
ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT: "Cannot change the binary logging format inside a stored function or trigger",
ER_NDB_CANT_SWITCH_BINLOG_FORMAT: "The NDB cluster engine does not support changing the binlog format on the fly yet",
ER_PARTITION_NO_TEMPORARY: "Cannot create temporary table with partitions",
ER_PARTITION_CONST_DOMAIN_ERROR: "Partition constant is out of partition function domain",
ER_PARTITION_FUNCTION_IS_NOT_ALLOWED: "This partition function is not allowed",
ER_DDL_LOG_ERROR: "Error in DDL log",
ER_NULL_IN_VALUES_LESS_THAN: "Not allowed to use NULL value in VALUES LESS THAN",
ER_WRONG_PARTITION_NAME: "Incorrect partition name",
ER_CANT_CHANGE_TX_CHARACTERISTICS: "Transaction characteristics can't be changed while a transaction is in progress",
ER_DUP_ENTRY_AUTOINCREMENT_CASE: "ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '%-.192s' for key '%-.192s'",
ER_EVENT_MODIFY_QUEUE_ERROR: "Internal scheduler error %d",
ER_EVENT_SET_VAR_ERROR: "Error during starting/stopping of the scheduler. Error code %u",
ER_PARTITION_MERGE_ERROR: "Engine cannot be used in partitioned tables",
ER_CANT_ACTIVATE_LOG: "Cannot activate '%-.64s' log",
ER_RBR_NOT_AVAILABLE: "The server was not built with row-based replication",
ER_BASE64_DECODE_ERROR: "Decoding of base64 string failed",
ER_EVENT_RECURSION_FORBIDDEN: "Recursion of EVENT DDL statements is forbidden when body is present",
ER_EVENTS_DB_ERROR: "Cannot proceed because system tables used by Event Scheduler were found damaged at server start",
ER_ONLY_INTEGERS_ALLOWED: "Only integers allowed as number here",
ER_UNSUPORTED_LOG_ENGINE: "This storage engine cannot be used for log tables\"",
ER_BAD_LOG_STATEMENT: "You cannot '%s' a log table if logging is enabled",
ER_CANT_RENAME_LOG_TABLE: "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'",
ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT: "Incorrect parameter count in the call to native function '%-.192s'",
ER_WRONG_PARAMETERS_TO_NATIVE_FCT: "Incorrect parameters in the call to native function '%-.192s'",
ER_WRONG_PARAMETERS_TO_STORED_FCT: "Incorrect parameters in the call to stored function '%-.192s'",
ER_NATIVE_FCT_NAME_COLLISION: "This function '%-.192s' has the same name as a native function",
ER_DUP_ENTRY_WITH_KEY_NAME: "Duplicate entry '%-.64s' for key '%-.192s'",
ER_BINLOG_PURGE_EMFILE: "Too many files opened, please execute the command again",
ER_EVENT_CANNOT_CREATE_IN_THE_PAST: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.",
ER_EVENT_CANNOT_ALTER_IN_THE_PAST: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was not changed. Specify a time in the future.",
ER_SLAVE_INCIDENT: "The incident %s occured on the master. Message: %-.64s",
ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT: "Table has no partition for some existing values",
ER_BINLOG_UNSAFE_STATEMENT: "Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. %s",
ER_SLAVE_FATAL_ERROR: "Fatal error: %s",
ER_SLAVE_RELAY_LOG_READ_FAILURE: "Relay log read failure: %s",
ER_SLAVE_RELAY_LOG_WRITE_FAILURE: "Relay log write failure: %s",
ER_SLAVE_CREATE_EVENT_FAILURE: "Failed to create %s",
ER_SLAVE_MASTER_COM_FAILURE: "Master command %s failed: %s",
ER_BINLOG_LOGGING_IMPOSSIBLE: "Binary logging not possible. Message: %s",
ER_VIEW_NO_CREATION_CTX: "View `%-.64s`.`%-.64s` has no creation context",
ER_VIEW_INVALID_CREATION_CTX: "Creation context of view `%-.64s`.`%-.64s' is invalid",
ER_SR_INVALID_CREATION_CTX: "Creation context of stored routine `%-.64s`.`%-.64s` is invalid",
ER_TRG_CORRUPTED_FILE: "Corrupted TRG file for table `%-.64s`.`%-.64s`",
ER_TRG_NO_CREATION_CTX: "Triggers for table `%-.64s`.`%-.64s` have no creation context",
ER_TRG_INVALID_CREATION_CTX: "Trigger creation context of table `%-.64s`.`%-.64s` is invalid",
ER_EVENT_INVALID_CREATION_CTX: "Creation context of event `%-.64s`.`%-.64s` is invalid",
ER_TRG_CANT_OPEN_TABLE: "Cannot open table for trigger `%-.64s`.`%-.64s`",
ER_CANT_CREATE_SROUTINE: "Cannot create stored routine `%-.64s`. Check warnings",
ER_NEVER_USED: "Ambiguous slave modes combination. %s",
ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT: "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement.",
ER_SLAVE_CORRUPT_EVENT: "Corrupted replication event was detected",
ER_LOAD_DATA_INVALID_COLUMN: "Invalid column reference (%-.64s) in LOAD DATA",
ER_LOG_PURGE_NO_FILE: "Being purged log %s was not found",
ER_XA_RBTIMEOUT: "XA_RBTIMEOUT: Transaction branch was rolled back: took too long",
ER_XA_RBDEADLOCK: "XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected",
ER_NEED_REPREPARE: "Prepared statement needs to be re-prepared",
ER_DELAYED_NOT_SUPPORTED: "DELAYED option not supported for table '%-.192s'",
WARN_NO_MASTER_INFO: "The master info structure does not exist",
WARN_OPTION_IGNORED: "<%-.64s> option ignored",
WARN_PLUGIN_DELETE_BUILTIN: "Built-in plugins cannot be deleted",
WARN_PLUGIN_BUSY: "Plugin is busy and will be uninstalled on shutdown",
ER_VARIABLE_IS_READONLY: "%s variable '%s' is read-only. Use SET %s to assign the value",
ER_WARN_ENGINE_TRANSACTION_ROLLBACK: "Storage engine %s does not support rollback for this statement. Transaction rolled back and must be restarted",
ER_SLAVE_HEARTBEAT_FAILURE: "Unexpected master's heartbeat data: %s",
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE: "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%s seconds).",
ER_NDB_REPLICATION_SCHEMA_ERROR: "Bad schema for mysql.ndb_replication table. Message: %-.64s",
ER_CONFLICT_FN_PARSE_ERROR: "Error in parsing conflict function. Message: %-.64s",
ER_EXCEPTIONS_WRITE_ERROR: "Write to exceptions table failed. Message: %-.128s\"",
ER_TOO_LONG_TABLE_COMMENT: "Comment for table '%-.64s' is too long (max = %lu)",
ER_TOO_LONG_FIELD_COMMENT: "Comment for field '%-.64s' is too long (max = %lu)",
ER_FUNC_INEXISTENT_NAME_COLLISION: "FUNCTION %s does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual",
ER_DATABASE_NAME: "Database",
ER_TABLE_NAME: "Table",
ER_PARTITION_NAME: "Partition",
ER_SUBPARTITION_NAME: "Subpartition",
ER_TEMPORARY_NAME: "Temporary",
ER_RENAMED_NAME: "Renamed",
ER_TOO_MANY_CONCURRENT_TRXS: "Too many active concurrent transactions",
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED: "Non-ASCII separator arguments are not fully supported",
ER_DEBUG_SYNC_TIMEOUT: "debug sync point wait timed out",
ER_DEBUG_SYNC_HIT_LIMIT: "debug sync point hit limit reached",
ER_DUP_SIGNAL_SET: "Duplicate condition information item '%s'",
ER_SIGNAL_WARN: "Unhandled user-defined warning condition",
ER_SIGNAL_NOT_FOUND: "Unhandled user-defined not found condition",
ER_SIGNAL_EXCEPTION: "Unhandled user-defined exception condition",
ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER: "RESIGNAL when handler not active",
ER_SIGNAL_BAD_CONDITION_TYPE: "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE",
WARN_COND_ITEM_TRUNCATED: "Data truncated for condition item '%s'",
ER_COND_ITEM_TOO_LONG: "Data too long for condition item '%s'",
ER_UNKNOWN_LOCALE: "Unknown locale: '%-.64s'",
ER_SLAVE_IGNORE_SERVER_IDS: "The requested server id %d clashes with the slave startup option --replicate-same-server-id",
ER_QUERY_CACHE_DISABLED: "Query cache is disabled; restart the server with query_cache_type=1 to enable it",
ER_SAME_NAME_PARTITION_FIELD: "Duplicate partition field name '%-.192s'",
ER_PARTITION_COLUMN_LIST_ERROR: "Inconsistency in usage of column lists for partitioning",
ER_WRONG_TYPE_COLUMN_VALUE_ERROR: "Partition column values of incorrect type",
ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR: "Too many fields in '%-.192s'",
ER_MAXVALUE_IN_VALUES_IN: "Cannot use MAXVALUE as value in VALUES IN",
ER_TOO_MANY_VALUES_ERROR: "Cannot have more than one value for this type of %-.64s partitioning",
ER_ROW_SINGLE_PARTITION_FIELD_ERROR: "Row expressions in VALUES IN only allowed for multi-field column partitioning",
ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD: "Field '%-.192s' is of a not allowed type for this type of partitioning",
ER_PARTITION_FIELDS_TOO_LONG: "The total length of the partitioning fields is too large",
ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE: "Cannot execute statement: impossible to write to binary log since both row-incapable engines and statement-incapable engines are involved.",
ER_BINLOG_ROW_MODE_AND_STMT_ENGINE: "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = ROW and at least one table uses a storage engine limited to statement-based logging.",
ER_BINLOG_UNSAFE_AND_STMT_ENGINE: "Cannot execute statement: impossible to write to binary log since statement is unsafe, storage engine is limited to statement-based logging, and BINLOG_FORMAT = MIXED. %s",
ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE: "Cannot execute statement: impossible to write to binary log since statement is in row format and at least one table uses a storage engine limited to statement-based logging.",
ER_BINLOG_STMT_MODE_AND_ROW_ENGINE: "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging.%s",
ER_BINLOG_ROW_INJECTION_AND_STMT_MODE: "Cannot execute statement: impossible to write to binary log since statement is in row format and BINLOG_FORMAT = STATEMENT.",
ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE: "Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging.",
ER_BINLOG_UNSAFE_LIMIT: "The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.",
ER_BINLOG_UNSAFE_INSERT_DELAYED: "The statement is unsafe because it uses INSERT DELAYED. This is unsafe because the times when rows are inserted cannot be predicted.",
ER_BINLOG_UNSAFE_SYSTEM_TABLE: "The statement is unsafe because it uses the general log, slow query log, or performance_schema table(s). This is unsafe because system tables may differ on slaves.",
ER_BINLOG_UNSAFE_AUTOINC_COLUMNS: "Statement is unsafe because it invokes a trigger or a stored function that inserts into an AUTO_INCREMENT column. Inserted values cannot be logged correctly.",
ER_BINLOG_UNSAFE_UDF: "Statement is unsafe because it uses a UDF which may not return the same value on the slave.",
ER_BINLOG_UNSAFE_SYSTEM_VARIABLE: "Statement is unsafe because it uses a system variable that may have a different value on the slave.",
ER_BINLOG_UNSAFE_SYSTEM_FUNCTION: "Statement is unsafe because it uses a system function that may return a different value on the slave.",
ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS: "Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction.",
ER_MESSAGE_AND_STATEMENT: "%s Statement: %s",
ER_SLAVE_CONVERSION_FAILED: "Column %d of table '%-.192s.%-.192s' cannot be converted from type '%-.32s' to type '%-.32s'",
ER_SLAVE_CANT_CREATE_CONVERSION: "Can't create conversion table for table '%-.192s.%-.192s'",
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT: "Cannot modify @@session.binlog_format inside a transaction",
ER_PATH_LENGTH: "The path specified for %.64s is too long.",
ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT: "'%s' is deprecated and will be removed in a future release.",
ER_WRONG_NATIVE_TABLE_STRUCTURE: "Native table '%-.64s'.'%-.64s' has the wrong structure",
ER_WRONG_PERFSCHEMA_USAGE: "Invalid performance_schema usage.",
ER_WARN_I_S_SKIPPED_TABLE: "Table '%s'.'%s' was skipped since its definition is being modified by concurrent DDL statement",
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT: "Cannot modify @@session.binlog_direct_non_transactional_updates inside a transaction",
ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT: "Cannot change the binlog direct flag inside a stored function or trigger",
ER_SPATIAL_MUST_HAVE_GEOM_COL: "A SPATIAL index may only contain a geometrical type column",
ER_TOO_LONG_INDEX_COMMENT: "Comment for index '%-.64s' is too long (max = %lu)",
ER_LOCK_ABORTED: "Wait on a lock was aborted due to a pending exclusive lock",
ER_DATA_OUT_OF_RANGE: "%s value is out of range in '%s'",
ER_WRONG_SPVAR_TYPE_IN_LIMIT: "A variable of a non-integer based type in LIMIT clause",
ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE: "Mixing self-logging and non-self-logging engines in a statement is unsafe.",
ER_BINLOG_UNSAFE_MIXED_STATEMENT: "Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them.",
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN: "Cannot modify @@session.sql_log_bin inside a transaction",
ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN: "Cannot change the sql_log_bin inside a stored function or trigger",
ER_FAILED_READ_FROM_PAR_FILE: "Failed to read from the .par file",
ER_VALUES_IS_NOT_INT_TYPE_ERROR: "VALUES value for partition '%-.64s' must have type INT",
ER_ACCESS_DENIED_NO_PASSWORD_ERROR: "Access denied for user '%-.48s'@'%-.64s'",
ER_SET_PASSWORD_AUTH_PLUGIN: "SET PASSWORD has no significance for users authenticating via plugins",
ER_GRANT_PLUGIN_USER_EXISTS: "GRANT with IDENTIFIED WITH is illegal because the user %-.*s already exists",
ER_TRUNCATE_ILLEGAL_FK: "Cannot truncate a table referenced in a foreign key constraint (%.192s)",
ER_PLUGIN_IS_PERMANENT: "Plugin '%s' is force_plus_permanent and can not be unloaded",
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN: "The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled.",
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX: "The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.",
ER_STMT_CACHE_FULL: "Multi-row statements required more than 'max_binlog_stmt_cache_size' bytes of storage; increase this mysqld variable and try again",
ER_MULTI_UPDATE_KEY_CONFLICT: "Primary key/partition key update is not allowed since the table is updated both as '%-.192s' and '%-.192s'.",
ER_TABLE_NEEDS_REBUILD: "Table rebuild required. Please do \"ALTER TABLE `%-.32s` FORCE\" or dump/reload to fix it!",
WARN_OPTION_BELOW_LIMIT: "The value of '%s' should be no less than the value of '%s'",
ER_INDEX_COLUMN_TOO_LONG: "Index column size too large. The maximum column size is %lu bytes.",
ER_ERROR_IN_TRIGGER_BODY: "Trigger '%-.64s' has an error in its body: '%-.256s'",
ER_ERROR_IN_UNKNOWN_TRIGGER_BODY: "Unknown trigger has an error in its body: '%-.256s'",
ER_INDEX_CORRUPT: "Index %s is corrupted",
ER_UNDO_RECORD_TOO_BIG: "Undo log record is too big.",
ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT: "INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.",
ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE: "INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.",
ER_BINLOG_UNSAFE_REPLACE_SELECT: "REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.",
ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT: "CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.",
ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT: "CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.",
ER_BINLOG_UNSAFE_UPDATE_IGNORE: "UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.",
ER_PLUGIN_NO_UNINSTALL: "Plugin '%s' is marked as not dynamically uninstallable. You have to stop the server to uninstall it.",
ER_PLUGIN_NO_INSTALL: "Plugin '%s' is marked as not dynamically installable. You have to stop the server to install it.",
ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT: "Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave.",
ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC: "CREATE TABLE... SELECT... on a table with an auto-increment column is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and the slave.",
ER_BINLOG_UNSAFE_INSERT_TWO_KEYS: "INSERT... ON DUPLICATE KEY UPDATE on a table with more than one UNIQUE KEY is unsafe",
ER_TABLE_IN_FK_CHECK: "Table is being used in foreign key check.",
ER_UNSUPPORTED_ENGINE: "Storage engine '%s' does not support system tables. [%s.%s]",
ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST: "INSERT into autoincrement field which is not the first part in the composed primary key is unsafe.",
ER_CANNOT_LOAD_FROM_TABLE_V2: "Cannot load from %s.%s. The table is probably corrupted",
ER_MASTER_DELAY_VALUE_OUT_OF_RANGE: "The requested value %u for the master delay exceeds the maximum %u",
ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT: "Only Format_description_log_event and row events are allowed in BINLOG statements (but %s was provided)",
ER_PARTITION_EXCHANGE_DIFFERENT_OPTION: "Non matching attribute '%-.64s' between partition and table",
ER_PARTITION_EXCHANGE_PART_TABLE: "Table to exchange with partition is partitioned: '%-.64s'",
ER_PARTITION_EXCHANGE_TEMP_TABLE: "Table to exchange with partition is temporary: '%-.64s'",
ER_PARTITION_INSTEAD_OF_SUBPARTITION: "Subpartitioned table, use subpartition instead of partition",
ER_UNKNOWN_PARTITION: "Unknown partition '%-.64s' in table '%-.64s'",
ER_TABLES_DIFFERENT_METADATA: "Tables have different definitions",
ER_ROW_DOES_NOT_MATCH_PARTITION: "Found a row that does not match the partition",
ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX: "Option binlog_cache_size (%lu) is greater than max_binlog_cache_size (%lu); setting binlog_cache_size equal to max_binlog_cache_size.",
ER_WARN_INDEX_NOT_APPLICABLE: "Cannot use %-.64s access on index '%-.64s' due to type or collation conversion on field '%-.64s'",
ER_PARTITION_EXCHANGE_FOREIGN_KEY: "Table to exchange with partition has foreign key references: '%-.64s'",
ER_NO_SUCH_KEY_VALUE: "Key value '%-.192s' was not found in table '%-.192s.%-.192s'",
ER_RPL_INFO_DATA_TOO_LONG: "Data for column '%s' too long",
ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE: "Replication event checksum verification failed while reading from network.",
ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE: "Replication event checksum verification failed while reading from a log file.",
ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX: "Option binlog_stmt_cache_size (%lu) is greater than max_binlog_stmt_cache_size (%lu); setting binlog_stmt_cache_size equal to max_binlog_stmt_cache_size.",
ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT: "Can't update table '%-.192s' while '%-.192s' is being created.",
ER_PARTITION_CLAUSE_ON_NONPARTITIONED: "PARTITION () clause on non partitioned table",
ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET: "Found a row not matching the given partition set",
ER_NO_SUCH_PARTITION__UNUSED: "partition '%-.64s' doesn't exist",
ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE: "Failure while changing the type of replication repository: %s.",
ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE: "The creation of some temporary tables could not be rolled back.",
ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE: "Some temporary tables were dropped, but these operations could not be rolled back.",
ER_MTS_FEATURE_IS_NOT_SUPPORTED: "%s is not supported in multi-threaded slave mode. %s",
ER_MTS_UPDATED_DBS_GREATER_MAX: "The number of modified databases exceeds the maximum %d; the database names will not be included in the replication event metadata.",
ER_MTS_CANT_PARALLEL: "Cannot execute the current event group in the parallel mode. Encountered event %s, relay-log name %s, position %s which prevents execution of this event group in parallel mode. Reason: %s.",
ER_MTS_INCONSISTENT_DATA: "%s",
ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING: "FULLTEXT index is not supported for partitioned tables.",
ER_DA_INVALID_CONDITION_NUMBER: "Invalid condition number",
ER_INSECURE_PLAIN_TEXT: "Sending passwords in plain text without SSL/TLS is extremely insecure.",
ER_INSECURE_CHANGE_MASTER: "Storing MySQL user name or password information in the master.info repository is not secure and is therefore not recommended. Please see the MySQL Manual for more about this issue and possible alternatives.",
ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO: "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in table '%.192s', key '%.192s'",
ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO: "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in a child table",
ER_SQLTHREAD_WITH_SECURE_SLAVE: "Setting authentication options is not possible when only the Slave SQL Thread is being started.",
ER_TABLE_HAS_NO_FT: "The table does not have FULLTEXT index to support this query",
ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER: "The system variable %.200s cannot be set in stored functions or triggers.",
ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION: "The system variable %.200s cannot be set when there is an ongoing transaction.",
ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST: "The system variable @@SESSION.GTID_NEXT has the value %.200s, which is not listed in @@SESSION.GTID_NEXT_LIST.",
ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL: "When @@SESSION.GTID_NEXT_LIST == NULL, the system variable @@SESSION.GTID_NEXT cannot change inside a transaction.",
ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION: "The statement 'SET %.200s' cannot invoke a stored function.",
ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL: "The system variable @@SESSION.GTID_NEXT cannot be 'AUTOMATIC' when @@SESSION.GTID_NEXT_LIST is non-NULL.",
ER_SKIPPING_LOGGED_TRANSACTION: "Skipping transaction %.200s because it has already been executed and logged.",
ER_MALFORMED_GTID_SET_SPECIFICATION: "Malformed GTID set specification '%.200s'.",
ER_MALFORMED_GTID_SET_ENCODING: "Malformed GTID set encoding.",
ER_MALFORMED_GTID_SPECIFICATION: "Malformed GTID specification '%.200s'.",
ER_GNO_EXHAUSTED: "Impossible to generate Global Transaction Identifier: the integer component reached the maximal value. Restart the server with a new server_uuid.",
ER_BAD_SLAVE_AUTO_POSITION: "Parameters MASTER_LOG_FILE, MASTER_LOG_POS, RELAY_LOG_FILE and RELAY_LOG_POS cannot be set when MASTER_AUTO_POSITION is active.",
ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON: "CHANGE MASTER TO MASTER_AUTO_POSITION = 1 can only be executed when @@GLOBAL.GTID_MODE = ON.",
ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET: "Cannot execute statements with implicit commit inside a transaction when @@SESSION.GTID_NEXT != AUTOMATIC or @@SESSION.GTID_NEXT_LIST != NULL.",
ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON: "@@GLOBAL.GTID_MODE = ON or UPGRADE_STEP_2 requires @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1.",
ER_GTID_MODE_REQUIRES_BINLOG: "@@GLOBAL.GTID_MODE = ON or UPGRADE_STEP_1 or UPGRADE_STEP_2 requires --log-bin and --log-slave-updates.",
ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF: "@@SESSION.GTID_NEXT cannot be set to UUID:NUMBER when @@GLOBAL.GTID_MODE = OFF.",
ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON: "@@SESSION.GTID_NEXT cannot be set to ANONYMOUS when @@GLOBAL.GTID_MODE = ON.",
ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF: "@@SESSION.GTID_NEXT_LIST cannot be set to a non-NULL value when @@GLOBAL.GTID_MODE = OFF.",
ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF: "Found a Gtid_log_event or Previous_gtids_log_event when @@GLOBAL.GTID_MODE = OFF.",
ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE: "When @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1, updates to non-transactional tables can only be done in either autocommitted statements or single-statement transactions, and never in the same statement as updates to transactional tables.",
ER_GTID_UNSAFE_CREATE_SELECT: "CREATE TABLE ... SELECT is forbidden when @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1.",
ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION: "When @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1.",
ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME: "The value of @@GLOBAL.GTID_MODE can only change one step at a time: OFF <-> UPGRADE_STEP_1 <-> UPGRADE_STEP_2 <-> ON. Also note that this value must be stepped up or down simultaneously on all servers; see the Manual for instructions.",
ER_MASTER_HAS_PURGED_REQUIRED_GTIDS: "The slave is connecting using CHANGE MASTER TO MASTER_AUTO_POSITION = 1, but the master has purged binary logs containing GTIDs that the slave requires.",
ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID: "@@SESSION.GTID_NEXT cannot be changed by a client that owns a GTID. The client owns %s. Ownership is released on COMMIT or ROLLBACK.",
ER_UNKNOWN_EXPLAIN_FORMAT: "Unknown EXPLAIN format name: '%s'",
ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION: "Cannot execute statement in a READ ONLY transaction.",
ER_TOO_LONG_TABLE_PARTITION_COMMENT: "Comment for table partition '%-.64s' is too long (max = %lu)",
ER_SLAVE_CONFIGURATION: "Slave is not configured or failed to initialize properly. You must at least set --server-id to enable either a master or a slave. Additional error messages can be found in the MySQL error log.",
ER_INNODB_FT_LIMIT: "InnoDB presently supports one FULLTEXT index creation at a time",
ER_INNODB_NO_FT_TEMP_TABLE: "Cannot create FULLTEXT index on temporary InnoDB table",
ER_INNODB_FT_WRONG_DOCID_COLUMN: "Column '%-.192s' is of wrong type for an InnoDB FULLTEXT index",
ER_INNODB_FT_WRONG_DOCID_INDEX: "Index '%-.192s' is of wrong type for an InnoDB FULLTEXT index",
ER_INNODB_ONLINE_LOG_TOO_BIG: "Creating index '%-.192s' required more than 'innodb_online_alter_log_max_size' bytes of modification log. Please try again.",
ER_UNKNOWN_ALTER_ALGORITHM: "Unknown ALGORITHM '%s'",
ER_UNKNOWN_ALTER_LOCK: "Unknown LOCK type '%s'",
ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS: "CHANGE MASTER cannot be executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or START SLAVE UNTIL.",
ER_MTS_RECOVERY_FAILURE: "Cannot recover after SLAVE errored out in parallel execution mode. Additional error messages can be found in the MySQL error log.",
ER_MTS_RESET_WORKERS: "Cannot clean up worker info tables. Additional error messages can be found in the MySQL error log.",
ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2: "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted",
ER_SLAVE_SILENT_RETRY_TRANSACTION: "Slave must silently retry current transaction",
ER_DISCARD_FK_CHECKS_RUNNING: "There is a foreign key check running on table '%-.192s'. Cannot discard the table.",
ER_TABLE_SCHEMA_MISMATCH: "Schema mismatch (%s)",
ER_TABLE_IN_SYSTEM_TABLESPACE: "Table '%-.192s' in system tablespace",
ER_IO_READ_ERROR: "IO Read error: (%lu, %s) %s",
ER_IO_WRITE_ERROR: "IO Write error: (%lu, %s) %s",
ER_TABLESPACE_MISSING: "Tablespace is missing for table '%-.192s'",
ER_TABLESPACE_EXISTS: "Tablespace for table '%-.192s' exists. Please DISCARD the tablespace before IMPORT.",
ER_TABLESPACE_DISCARDED: "Tablespace has been discarded for table '%-.192s'",
ER_INTERNAL_ERROR: "Internal error: %s",
ER_INNODB_IMPORT_ERROR: "ALTER TABLE '%-.192s' IMPORT TABLESPACE failed with error %lu : '%s'",
ER_INNODB_INDEX_CORRUPT: "Index corrupt: %s",
ER_INVALID_YEAR_COLUMN_LENGTH: "YEAR(%lu) column type is deprecated. Creating YEAR(4) column instead.",
ER_NOT_VALID_PASSWORD: "Your password does not satisfy the current policy requirements",
ER_MUST_CHANGE_PASSWORD: "You must SET PASSWORD before executing this statement",
ER_FK_NO_INDEX_CHILD: "Failed to add the foreign key constaint. Missing index for constraint '%s' in the foreign table '%s'",
ER_FK_NO_INDEX_PARENT: "Failed to add the foreign key constaint. Missing index for constraint '%s' in the referenced table '%s'",
ER_FK_FAIL_ADD_SYSTEM: "Failed to add the foreign key constraint '%s' to system tables",
ER_FK_CANNOT_OPEN_PARENT: "Failed to open the referenced table '%s'",
ER_FK_INCORRECT_OPTION: "Failed to add the foreign key constraint on table '%s'. Incorrect options in FOREIGN KEY constraint '%s'",
ER_FK_DUP_NAME: "Duplicate foreign key constraint name '%s'",
ER_PASSWORD_FORMAT: "The password hash doesn't have the expected format. Check if the correct password algorithm is being used with the PASSWORD() function.",
ER_FK_COLUMN_CANNOT_DROP: "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s'",
ER_FK_COLUMN_CANNOT_DROP_CHILD: "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s' of table '%-.192s'",
ER_FK_COLUMN_NOT_NULL: "Column '%-.192s' cannot be NOT NULL: needed in a foreign key constraint '%-.192s' SET NULL",
ER_DUP_INDEX: "Duplicate index '%-.64s' defined on the table '%-.64s.%-.64s'. This is deprecated and will be disallowed in a future release.",
ER_FK_COLUMN_CANNOT_CHANGE: "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s'",
ER_FK_COLUMN_CANNOT_CHANGE_CHILD: "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s' of table '%-.192s'",
ER_FK_CANNOT_DELETE_PARENT: "Cannot delete rows from table which is parent in a foreign key constraint '%-.192s' of table '%-.192s'",
ER_MALFORMED_PACKET: "Malformed communication packet.",
ER_READ_ONLY_MODE: "Running in read-only mode",
ER_GTID_NEXT_TYPE_UNDEFINED_GROUP: "When @@SESSION.GTID_NEXT is set to a GTID, you must explicitly set it again after a COMMIT or ROLLBACK. If you see this error message in the slave SQL thread, it means that a table in the current transaction is transactional on the master and non-transactional on the slave. In a client connection, it means that you executed SET @@SESSION.GTID_NEXT before a transaction and forgot to set @@SESSION.GTID_NEXT to a different identifier or to 'AUTOMATIC' after COMMIT or ROLLBACK. Current @@SESSION.GTID_NEXT is '%s'.",
ER_VARIABLE_NOT_SETTABLE_IN_SP: "The system variable %.200s cannot be set in stored procedures.",
ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF: "@@GLOBAL.GTID_PURGED can only be set when @@GLOBAL.GTID_MODE = ON.",
ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY: "@@GLOBAL.GTID_PURGED can only be set when @@GLOBAL.GTID_EXECUTED is empty.",
ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY: "@@GLOBAL.GTID_PURGED can only be set when there are no ongoing transactions (not even in other clients).",
ER_GTID_PURGED_WAS_CHANGED: "@@GLOBAL.GTID_PURGED was changed from '%s' to '%s'.",
ER_GTID_EXECUTED_WAS_CHANGED: "@@GLOBAL.GTID_EXECUTED was changed from '%s' to '%s'.",
ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES: "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT, and both replicated and non replicated tables are written to.",
ER_ALTER_OPERATION_NOT_SUPPORTED: "%s is not supported for this operation. Try %s.",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON: "%s is not supported. Reason: %s. Try %s.",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY: "COPY algorithm requires a lock",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION: "Partition specific operations do not yet support LOCK/ALGORITHM",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME: "Columns participating in a foreign key are renamed",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE: "Cannot change column type INPLACE",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK: "Adding foreign keys needs foreign_key_checks=OFF",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE: "Creating unique indexes with IGNORE requires COPY algorithm to remove duplicate rows",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK: "Dropping a primary key is not allowed without also adding a new primary key",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC: "Adding an auto-increment column requires a lock",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS: "Cannot replace hidden FTS_DOC_ID with a user-visible one",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS: "Cannot drop or rename FTS_DOC_ID",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS: "Fulltext index creation requires a lock",
ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE: "sql_slave_skip_counter can not be set when the server is running with @@GLOBAL.GTID_MODE = ON. Instead, for each transaction that you want to skip, generate an empty transaction with the same GTID as the transaction",
ER_DUP_UNKNOWN_IN_INDEX: "Duplicate entry for key '%-.192s'",
ER_IDENT_CAUSES_TOO_LONG_PATH: "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'.",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL: "cannot silently convert NULL values, as required in this SQL_MODE",
ER_MUST_CHANGE_PASSWORD_LOGIN: "Your password has expired. To log in you must change it using a client that supports expired passwords.",
ER_ROW_IN_WRONG_PARTITION: "Found a row in wrong partition %s",
}

66
vendor/github.com/siddontang/go-mysql/mysql/error.go generated vendored Normal file
View File

@@ -0,0 +1,66 @@
package mysql
import (
"fmt"
"github.com/juju/errors"
)
var (
ErrBadConn = errors.New("connection was bad")
ErrMalformPacket = errors.New("Malform packet error")
ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back")
)
type MyError struct {
Code uint16
Message string
State string
}
func (e *MyError) Error() string {
return fmt.Sprintf("ERROR %d (%s): %s", e.Code, e.State, e.Message)
}
//default mysql error, must adapt errname message format
func NewDefaultError(errCode uint16, args ...interface{}) *MyError {
e := new(MyError)
e.Code = errCode
if s, ok := MySQLState[errCode]; ok {
e.State = s
} else {
e.State = DEFAULT_MYSQL_STATE
}
if format, ok := MySQLErrName[errCode]; ok {
e.Message = fmt.Sprintf(format, args...)
} else {
e.Message = fmt.Sprint(args...)
}
return e
}
func NewError(errCode uint16, message string) *MyError {
e := new(MyError)
e.Code = errCode
if s, ok := MySQLState[errCode]; ok {
e.State = s
} else {
e.State = DEFAULT_MYSQL_STATE
}
e.Message = message
return e
}
func ErrorCode(errMsg string) (code int) {
var tmpStr string
// golang scanf doesn't support %*,so I used a temporary variable
fmt.Sscanf(errMsg, "%s%d", &tmpStr, &code)
return
}

157
vendor/github.com/siddontang/go-mysql/mysql/field.go generated vendored Normal file
View File

@@ -0,0 +1,157 @@
package mysql
import (
"encoding/binary"
)
type FieldData []byte
type Field struct {
Data FieldData
Schema []byte
Table []byte
OrgTable []byte
Name []byte
OrgName []byte
Charset uint16
ColumnLength uint32
Type uint8
Flag uint16
Decimal uint8
DefaultValueLength uint64
DefaultValue []byte
}
func (p FieldData) Parse() (f *Field, err error) {
f = new(Field)
f.Data = p
var n int
pos := 0
//skip catelog, always def
n, err = SkipLengthEnodedString(p)
if err != nil {
return
}
pos += n
//schema
f.Schema, _, n, err = LengthEnodedString(p[pos:])
if err != nil {
return
}
pos += n
//table
f.Table, _, n, err = LengthEnodedString(p[pos:])
if err != nil {
return
}
pos += n
//org_table
f.OrgTable, _, n, err = LengthEnodedString(p[pos:])
if err != nil {
return
}
pos += n
//name
f.Name, _, n, err = LengthEnodedString(p[pos:])
if err != nil {
return
}
pos += n
//org_name
f.OrgName, _, n, err = LengthEnodedString(p[pos:])
if err != nil {
return
}
pos += n
//skip oc
pos += 1
//charset
f.Charset = binary.LittleEndian.Uint16(p[pos:])
pos += 2
//column length
f.ColumnLength = binary.LittleEndian.Uint32(p[pos:])
pos += 4
//type
f.Type = p[pos]
pos++
//flag
f.Flag = binary.LittleEndian.Uint16(p[pos:])
pos += 2
//decimals 1
f.Decimal = p[pos]
pos++
//filter [0x00][0x00]
pos += 2
f.DefaultValue = nil
//if more data, command was field list
if len(p) > pos {
//length of default value lenenc-int
f.DefaultValueLength, _, n = LengthEncodedInt(p[pos:])
pos += n
if pos+int(f.DefaultValueLength) > len(p) {
err = ErrMalformPacket
return
}
//default value string[$len]
f.DefaultValue = p[pos:(pos + int(f.DefaultValueLength))]
}
return
}
func (f *Field) Dump() []byte {
if f == nil {
f = &Field{}
}
if f.Data != nil {
return []byte(f.Data)
}
l := len(f.Schema) + len(f.Table) + len(f.OrgTable) + len(f.Name) + len(f.OrgName) + len(f.DefaultValue) + 48
data := make([]byte, 0, l)
data = append(data, PutLengthEncodedString([]byte("def"))...)
data = append(data, PutLengthEncodedString(f.Schema)...)
data = append(data, PutLengthEncodedString(f.Table)...)
data = append(data, PutLengthEncodedString(f.OrgTable)...)
data = append(data, PutLengthEncodedString(f.Name)...)
data = append(data, PutLengthEncodedString(f.OrgName)...)
data = append(data, 0x0c)
data = append(data, Uint16ToBytes(f.Charset)...)
data = append(data, Uint32ToBytes(f.ColumnLength)...)
data = append(data, f.Type)
data = append(data, Uint16ToBytes(f.Flag)...)
data = append(data, f.Decimal)
data = append(data, 0, 0)
if f.DefaultValue != nil {
data = append(data, Uint64ToBytes(f.DefaultValueLength)...)
data = append(data, f.DefaultValue...)
}
return data
}

27
vendor/github.com/siddontang/go-mysql/mysql/gtid.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
package mysql
import "github.com/juju/errors"
type GTIDSet interface {
String() string
// Encode GTID set into binary format used in binlog dump commands
Encode() []byte
Equal(o GTIDSet) bool
Contain(o GTIDSet) bool
Update(GTIDStr string) error
}
func ParseGTIDSet(flavor string, s string) (GTIDSet, error) {
switch flavor {
case MySQLFlavor:
return ParseMysqlGTIDSet(s)
case MariaDBFlavor:
return ParseMariadbGTIDSet(s)
default:
return nil, errors.Errorf("invalid flavor %s", flavor)
}
}

View File

@@ -0,0 +1,91 @@
package mysql
import (
"fmt"
"strconv"
"strings"
"github.com/juju/errors"
)
type MariadbGTID struct {
DomainID uint32
ServerID uint32
SequenceNumber uint64
}
// We don't support multi source replication, so the mariadb gtid set may have only domain-server-sequence
func ParseMariadbGTIDSet(str string) (GTIDSet, error) {
if len(str) == 0 {
return &MariadbGTID{0, 0, 0}, nil
}
seps := strings.Split(str, "-")
gtid := new(MariadbGTID)
if len(seps) != 3 {
return gtid, errors.Errorf("invalid Mariadb GTID %v, must domain-server-sequence", str)
}
domainID, err := strconv.ParseUint(seps[0], 10, 32)
if err != nil {
return gtid, errors.Errorf("invalid MariaDB GTID Domain ID (%v): %v", seps[0], err)
}
serverID, err := strconv.ParseUint(seps[1], 10, 32)
if err != nil {
return gtid, errors.Errorf("invalid MariaDB GTID Server ID (%v): %v", seps[1], err)
}
sequenceID, err := strconv.ParseUint(seps[2], 10, 64)
if err != nil {
return gtid, errors.Errorf("invalid MariaDB GTID Sequence number (%v): %v", seps[2], err)
}
return &MariadbGTID{
DomainID: uint32(domainID),
ServerID: uint32(serverID),
SequenceNumber: sequenceID}, nil
}
func (gtid *MariadbGTID) String() string {
if gtid.DomainID == 0 && gtid.ServerID == 0 && gtid.SequenceNumber == 0 {
return ""
}
return fmt.Sprintf("%d-%d-%d", gtid.DomainID, gtid.ServerID, gtid.SequenceNumber)
}
func (gtid *MariadbGTID) Encode() []byte {
return []byte(gtid.String())
}
func (gtid *MariadbGTID) Equal(o GTIDSet) bool {
other, ok := o.(*MariadbGTID)
if !ok {
return false
}
return *gtid == *other
}
func (gtid *MariadbGTID) Contain(o GTIDSet) bool {
other, ok := o.(*MariadbGTID)
if !ok {
return false
}
return gtid.DomainID == other.DomainID && gtid.SequenceNumber >= other.SequenceNumber
}
func (gtid *MariadbGTID) Update(GTIDStr string) error {
newGTID, err := ParseMariadbGTIDSet(GTIDStr)
if err != nil {
return err
}
*gtid = *(newGTID.(*MariadbGTID))
return nil
}

View File

@@ -0,0 +1,429 @@
package mysql
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"sort"
"strconv"
"strings"
"github.com/juju/errors"
"github.com/satori/go.uuid"
"github.com/siddontang/go/hack"
)
// Like MySQL GTID Interval struct, [start, stop), left closed and right open
// See MySQL rpl_gtid.h
type Interval struct {
// The first GID of this interval.
Start int64
// The first GID after this interval.
Stop int64
}
// Interval is [start, stop), but the GTID string's format is [n] or [n1-n2], closed interval
func parseInterval(str string) (i Interval, err error) {
p := strings.Split(str, "-")
switch len(p) {
case 1:
i.Start, err = strconv.ParseInt(p[0], 10, 64)
i.Stop = i.Start + 1
case 2:
i.Start, err = strconv.ParseInt(p[0], 10, 64)
i.Stop, err = strconv.ParseInt(p[1], 10, 64)
i.Stop = i.Stop + 1
default:
err = errors.Errorf("invalid interval format, must n[-n]")
}
if err != nil {
return
}
if i.Stop <= i.Start {
err = errors.Errorf("invalid interval format, must n[-n] and the end must >= start")
}
return
}
func (i Interval) String() string {
if i.Stop == i.Start+1 {
return fmt.Sprintf("%d", i.Start)
} else {
return fmt.Sprintf("%d-%d", i.Start, i.Stop-1)
}
}
type IntervalSlice []Interval
func (s IntervalSlice) Len() int {
return len(s)
}
func (s IntervalSlice) Less(i, j int) bool {
if s[i].Start < s[j].Start {
return true
} else if s[i].Start > s[j].Start {
return false
} else {
return s[i].Stop < s[j].Stop
}
}
func (s IntervalSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s IntervalSlice) Sort() {
sort.Sort(s)
}
func (s IntervalSlice) Normalize() IntervalSlice {
var n IntervalSlice
if len(s) == 0 {
return n
}
s.Sort()
n = append(n, s[0])
for i := 1; i < len(s); i++ {
last := n[len(n)-1]
if s[i].Start > last.Stop {
n = append(n, s[i])
continue
} else {
stop := s[i].Stop
if last.Stop > stop {
stop = last.Stop
}
n[len(n)-1] = Interval{last.Start, stop}
}
}
return n
}
// Return true if sub in s
func (s IntervalSlice) Contain(sub IntervalSlice) bool {
j := 0
for i := 0; i < len(sub); i++ {
for ; j < len(s); j++ {
if sub[i].Start > s[j].Stop {
continue
} else {
break
}
}
if j == len(s) {
return false
}
if sub[i].Start < s[j].Start || sub[i].Stop > s[j].Stop {
return false
}
}
return true
}
func (s IntervalSlice) Equal(o IntervalSlice) bool {
if len(s) != len(o) {
return false
}
for i := 0; i < len(s); i++ {
if s[i].Start != o[i].Start || s[i].Stop != o[i].Stop {
return false
}
}
return true
}
func (s IntervalSlice) Compare(o IntervalSlice) int {
if s.Equal(o) {
return 0
} else if s.Contain(o) {
return 1
} else {
return -1
}
}
// Refer http://dev.mysql.com/doc/refman/5.6/en/replication-gtids-concepts.html
type UUIDSet struct {
SID uuid.UUID
Intervals IntervalSlice
}
func ParseUUIDSet(str string) (*UUIDSet, error) {
str = strings.TrimSpace(str)
sep := strings.Split(str, ":")
if len(sep) < 2 {
return nil, errors.Errorf("invalid GTID format, must UUID:interval[:interval]")
}
var err error
s := new(UUIDSet)
if s.SID, err = uuid.FromString(sep[0]); err != nil {
return nil, errors.Trace(err)
}
// Handle interval
for i := 1; i < len(sep); i++ {
if in, err := parseInterval(sep[i]); err != nil {
return nil, errors.Trace(err)
} else {
s.Intervals = append(s.Intervals, in)
}
}
s.Intervals = s.Intervals.Normalize()
return s, nil
}
func NewUUIDSet(sid uuid.UUID, in ...Interval) *UUIDSet {
s := new(UUIDSet)
s.SID = sid
s.Intervals = in
s.Intervals = s.Intervals.Normalize()
return s
}
func (s *UUIDSet) Contain(sub *UUIDSet) bool {
if !bytes.Equal(s.SID.Bytes(), sub.SID.Bytes()) {
return false
}
return s.Intervals.Contain(sub.Intervals)
}
func (s *UUIDSet) Bytes() []byte {
var buf bytes.Buffer
buf.WriteString(s.SID.String())
for _, i := range s.Intervals {
buf.WriteString(":")
buf.WriteString(i.String())
}
return buf.Bytes()
}
func (s *UUIDSet) AddInterval(in IntervalSlice) {
s.Intervals = append(s.Intervals, in...)
s.Intervals = s.Intervals.Normalize()
}
func (s *UUIDSet) String() string {
return hack.String(s.Bytes())
}
func (s *UUIDSet) encode(w io.Writer) {
w.Write(s.SID.Bytes())
n := int64(len(s.Intervals))
binary.Write(w, binary.LittleEndian, n)
for _, i := range s.Intervals {
binary.Write(w, binary.LittleEndian, i.Start)
binary.Write(w, binary.LittleEndian, i.Stop)
}
}
func (s *UUIDSet) Encode() []byte {
var buf bytes.Buffer
s.encode(&buf)
return buf.Bytes()
}
func (s *UUIDSet) decode(data []byte) (int, error) {
if len(data) < 24 {
return 0, errors.Errorf("invalid uuid set buffer, less 24")
}
pos := 0
var err error
if s.SID, err = uuid.FromBytes(data[0:16]); err != nil {
return 0, err
}
pos += 16
n := int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
pos += 8
if len(data) < int(16*n)+pos {
return 0, errors.Errorf("invalid uuid set buffer, must %d, but %d", pos+int(16*n), len(data))
}
s.Intervals = make([]Interval, 0, n)
var in Interval
for i := int64(0); i < n; i++ {
in.Start = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
pos += 8
in.Stop = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
pos += 8
s.Intervals = append(s.Intervals, in)
}
return pos, nil
}
func (s *UUIDSet) Decode(data []byte) error {
n, err := s.decode(data)
if n != len(data) {
return errors.Errorf("invalid uuid set buffer, must %d, but %d", n, len(data))
}
return err
}
type MysqlGTIDSet struct {
Sets map[string]*UUIDSet
}
func ParseMysqlGTIDSet(str string) (GTIDSet, error) {
s := new(MysqlGTIDSet)
s.Sets = make(map[string]*UUIDSet)
if str == "" {
return s, nil
}
sp := strings.Split(str, ",")
//todo, handle redundant same uuid
for i := 0; i < len(sp); i++ {
if set, err := ParseUUIDSet(sp[i]); err != nil {
return nil, errors.Trace(err)
} else {
s.AddSet(set)
}
}
return s, nil
}
func DecodeMysqlGTIDSet(data []byte) (*MysqlGTIDSet, error) {
s := new(MysqlGTIDSet)
if len(data) < 8 {
return nil, errors.Errorf("invalid gtid set buffer, less 4")
}
n := int(binary.LittleEndian.Uint64(data))
s.Sets = make(map[string]*UUIDSet, n)
pos := 8
for i := 0; i < n; i++ {
set := new(UUIDSet)
if n, err := set.decode(data[pos:]); err != nil {
return nil, errors.Trace(err)
} else {
pos += n
s.AddSet(set)
}
}
return s, nil
}
func (s *MysqlGTIDSet) AddSet(set *UUIDSet) {
if set == nil {
return
}
sid := set.SID.String()
o, ok := s.Sets[sid]
if ok {
o.AddInterval(set.Intervals)
} else {
s.Sets[sid] = set
}
}
func (s *MysqlGTIDSet) Update(GTIDStr string) error {
uuidSet, err := ParseUUIDSet(GTIDStr)
if err != nil {
return err
}
s.AddSet(uuidSet)
return nil
}
func (s *MysqlGTIDSet) Contain(o GTIDSet) bool {
sub, ok := o.(*MysqlGTIDSet)
if !ok {
return false
}
for key, set := range sub.Sets {
o, ok := s.Sets[key]
if !ok {
return false
}
if !o.Contain(set) {
return false
}
}
return true
}
func (s *MysqlGTIDSet) Equal(o GTIDSet) bool {
sub, ok := o.(*MysqlGTIDSet)
if !ok {
return false
}
for key, set := range sub.Sets {
o, ok := s.Sets[key]
if !ok {
return false
}
if !o.Intervals.Equal(set.Intervals) {
return false
}
}
return true
}
func (s *MysqlGTIDSet) String() string {
var buf bytes.Buffer
sep := ""
for _, set := range s.Sets {
buf.WriteString(sep)
buf.WriteString(set.String())
sep = ","
}
return hack.String(buf.Bytes())
}
func (s *MysqlGTIDSet) Encode() []byte {
var buf bytes.Buffer
binary.Write(&buf, binary.LittleEndian, uint64(len(s.Sets)))
for i, _ := range s.Sets {
s.Sets[i].encode(&buf)
}
return buf.Bytes()
}

View File

@@ -0,0 +1,53 @@
package mysql
import (
"encoding/binary"
"math"
)
func ParseBinaryInt8(data []byte) int8 {
return int8(data[0])
}
func ParseBinaryUint8(data []byte) uint8 {
return data[0]
}
func ParseBinaryInt16(data []byte) int16 {
return int16(binary.LittleEndian.Uint16(data))
}
func ParseBinaryUint16(data []byte) uint16 {
return binary.LittleEndian.Uint16(data)
}
func ParseBinaryInt24(data []byte) int32 {
u32 := uint32(ParseBinaryUint24(data))
if u32&0x00800000 != 0 {
u32 |= 0xFF000000
}
return int32(u32)
}
func ParseBinaryUint24(data []byte) uint32 {
return uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16
}
func ParseBinaryInt32(data []byte) int32 {
return int32(binary.LittleEndian.Uint32(data))
}
func ParseBinaryUint32(data []byte) uint32 {
return binary.LittleEndian.Uint32(data)
}
func ParseBinaryInt64(data []byte) int64 {
return int64(binary.LittleEndian.Uint64(data))
}
func ParseBinaryUint64(data []byte) uint64 {
return binary.LittleEndian.Uint64(data)
}
func ParseBinaryFloat32(data []byte) float32 {
return math.Float32frombits(binary.LittleEndian.Uint32(data))
}
func ParseBinaryFloat64(data []byte) float64 {
return math.Float64frombits(binary.LittleEndian.Uint64(data))
}

View File

@@ -0,0 +1,33 @@
package mysql
import (
"fmt"
)
// For binlog filename + position based replication
type Position struct {
Name string
Pos uint32
}
func (p Position) Compare(o Position) int {
// First compare binlog name
if p.Name > o.Name {
return 1
} else if p.Name < o.Name {
return -1
} else {
// Same binlog file, compare position
if p.Pos > o.Pos {
return 1
} else if p.Pos < o.Pos {
return -1
} else {
return 0
}
}
}
func (p Position) String() string {
return fmt.Sprintf("(%s, %d)", p.Name, p.Pos)
}

14
vendor/github.com/siddontang/go-mysql/mysql/result.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
package mysql
type Result struct {
Status uint16
InsertId uint64
AffectedRows uint64
*Resultset
}
type Executer interface {
Execute(query string, args ...interface{}) (*Result, error)
}

View File

@@ -0,0 +1,438 @@
package mysql
import (
"fmt"
"strconv"
"github.com/juju/errors"
"github.com/siddontang/go/hack"
)
type RowData []byte
func (p RowData) Parse(f []*Field, binary bool) ([]interface{}, error) {
if binary {
return p.ParseBinary(f)
} else {
return p.ParseText(f)
}
}
func (p RowData) ParseText(f []*Field) ([]interface{}, error) {
data := make([]interface{}, len(f))
var err error
var v []byte
var isNull bool
var pos int = 0
var n int = 0
for i := range f {
v, isNull, n, err = LengthEnodedString(p[pos:])
if err != nil {
return nil, errors.Trace(err)
}
pos += n
if isNull {
data[i] = nil
} else {
isUnsigned := f[i].Flag&UNSIGNED_FLAG != 0
switch f[i].Type {
case MYSQL_TYPE_TINY, MYSQL_TYPE_SHORT, MYSQL_TYPE_INT24,
MYSQL_TYPE_LONGLONG, MYSQL_TYPE_YEAR:
if isUnsigned {
data[i], err = strconv.ParseUint(string(v), 10, 64)
} else {
data[i], err = strconv.ParseInt(string(v), 10, 64)
}
case MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE:
data[i], err = strconv.ParseFloat(string(v), 64)
default:
data[i] = v
}
if err != nil {
return nil, errors.Trace(err)
}
}
}
return data, nil
}
func (p RowData) ParseBinary(f []*Field) ([]interface{}, error) {
data := make([]interface{}, len(f))
if p[0] != OK_HEADER {
return nil, ErrMalformPacket
}
pos := 1 + ((len(f) + 7 + 2) >> 3)
nullBitmap := p[1:pos]
var isNull bool
var n int
var err error
var v []byte
for i := range data {
if nullBitmap[(i+2)/8]&(1<<(uint(i+2)%8)) > 0 {
data[i] = nil
continue
}
isUnsigned := f[i].Flag&UNSIGNED_FLAG != 0
switch f[i].Type {
case MYSQL_TYPE_NULL:
data[i] = nil
continue
case MYSQL_TYPE_TINY:
if isUnsigned {
data[i] = ParseBinaryUint8(p[pos : pos+1])
} else {
data[i] = ParseBinaryInt8(p[pos : pos+1])
}
pos++
continue
case MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR:
if isUnsigned {
data[i] = ParseBinaryUint16(p[pos : pos+2])
} else {
data[i] = ParseBinaryInt16(p[pos : pos+2])
}
pos += 2
continue
case MYSQL_TYPE_INT24:
if isUnsigned {
data[i] = ParseBinaryUint24(p[pos : pos+3])
} else {
data[i] = ParseBinaryInt24(p[pos : pos+3])
}
pos += 4
continue
case MYSQL_TYPE_LONG:
if isUnsigned {
data[i] = ParseBinaryUint32(p[pos : pos+4])
} else {
data[i] = ParseBinaryInt32(p[pos : pos+4])
}
pos += 4
continue
case MYSQL_TYPE_LONGLONG:
if isUnsigned {
data[i] = ParseBinaryUint64(p[pos : pos+8])
} else {
data[i] = ParseBinaryInt64(p[pos : pos+8])
}
pos += 8
continue
case MYSQL_TYPE_FLOAT:
data[i] = ParseBinaryFloat32(p[pos : pos+4])
pos += 4
continue
case MYSQL_TYPE_DOUBLE:
data[i] = ParseBinaryFloat64(p[pos : pos+8])
pos += 8
continue
case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_BIT, MYSQL_TYPE_ENUM, MYSQL_TYPE_SET, MYSQL_TYPE_TINY_BLOB,
MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_BLOB,
MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY:
v, isNull, n, err = LengthEnodedString(p[pos:])
pos += n
if err != nil {
return nil, errors.Trace(err)
}
if !isNull {
data[i] = v
continue
} else {
data[i] = nil
continue
}
case MYSQL_TYPE_DATE, MYSQL_TYPE_NEWDATE:
var num uint64
num, isNull, n = LengthEncodedInt(p[pos:])
pos += n
if isNull {
data[i] = nil
continue
}
data[i], err = FormatBinaryDate(int(num), p[pos:])
pos += int(num)
if err != nil {
return nil, errors.Trace(err)
}
case MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_DATETIME:
var num uint64
num, isNull, n = LengthEncodedInt(p[pos:])
pos += n
if isNull {
data[i] = nil
continue
}
data[i], err = FormatBinaryDateTime(int(num), p[pos:])
pos += int(num)
if err != nil {
return nil, errors.Trace(err)
}
case MYSQL_TYPE_TIME:
var num uint64
num, isNull, n = LengthEncodedInt(p[pos:])
pos += n
if isNull {
data[i] = nil
continue
}
data[i], err = FormatBinaryTime(int(num), p[pos:])
pos += int(num)
if err != nil {
return nil, errors.Trace(err)
}
default:
return nil, errors.Errorf("Stmt Unknown FieldType %d %s", f[i].Type, f[i].Name)
}
}
return data, nil
}
type Resultset struct {
Fields []*Field
FieldNames map[string]int
Values [][]interface{}
RowDatas []RowData
}
func (r *Resultset) RowNumber() int {
return len(r.Values)
}
func (r *Resultset) ColumnNumber() int {
return len(r.Fields)
}
func (r *Resultset) GetValue(row, column int) (interface{}, error) {
if row >= len(r.Values) || row < 0 {
return nil, errors.Errorf("invalid row index %d", row)
}
if column >= len(r.Fields) || column < 0 {
return nil, errors.Errorf("invalid column index %d", column)
}
return r.Values[row][column], nil
}
func (r *Resultset) NameIndex(name string) (int, error) {
if column, ok := r.FieldNames[name]; ok {
return column, nil
} else {
return 0, errors.Errorf("invalid field name %s", name)
}
}
func (r *Resultset) GetValueByName(row int, name string) (interface{}, error) {
if column, err := r.NameIndex(name); err != nil {
return nil, errors.Trace(err)
} else {
return r.GetValue(row, column)
}
}
func (r *Resultset) IsNull(row, column int) (bool, error) {
d, err := r.GetValue(row, column)
if err != nil {
return false, err
}
return d == nil, nil
}
func (r *Resultset) IsNullByName(row int, name string) (bool, error) {
if column, err := r.NameIndex(name); err != nil {
return false, err
} else {
return r.IsNull(row, column)
}
}
func (r *Resultset) GetUint(row, column int) (uint64, error) {
d, err := r.GetValue(row, column)
if err != nil {
return 0, err
}
switch v := d.(type) {
case int:
return uint64(v), nil
case int8:
return uint64(v), nil
case int16:
return uint64(v), nil
case int32:
return uint64(v), nil
case int64:
return uint64(v), nil
case uint:
return uint64(v), nil
case uint8:
return uint64(v), nil
case uint16:
return uint64(v), nil
case uint32:
return uint64(v), nil
case uint64:
return uint64(v), nil
case float32:
return uint64(v), nil
case float64:
return uint64(v), nil
case string:
return strconv.ParseUint(v, 10, 64)
case []byte:
return strconv.ParseUint(string(v), 10, 64)
case nil:
return 0, nil
default:
return 0, errors.Errorf("data type is %T", v)
}
}
func (r *Resultset) GetUintByName(row int, name string) (uint64, error) {
if column, err := r.NameIndex(name); err != nil {
return 0, err
} else {
return r.GetUint(row, column)
}
}
func (r *Resultset) GetInt(row, column int) (int64, error) {
v, err := r.GetUint(row, column)
if err != nil {
return 0, err
}
return int64(v), nil
}
func (r *Resultset) GetIntByName(row int, name string) (int64, error) {
v, err := r.GetUintByName(row, name)
if err != nil {
return 0, err
}
return int64(v), nil
}
func (r *Resultset) GetFloat(row, column int) (float64, error) {
d, err := r.GetValue(row, column)
if err != nil {
return 0, err
}
switch v := d.(type) {
case int:
return float64(v), nil
case int8:
return float64(v), nil
case int16:
return float64(v), nil
case int32:
return float64(v), nil
case int64:
return float64(v), nil
case uint:
return float64(v), nil
case uint8:
return float64(v), nil
case uint16:
return float64(v), nil
case uint32:
return float64(v), nil
case uint64:
return float64(v), nil
case float32:
return float64(v), nil
case float64:
return v, nil
case string:
return strconv.ParseFloat(v, 64)
case []byte:
return strconv.ParseFloat(string(v), 64)
case nil:
return 0, nil
default:
return 0, errors.Errorf("data type is %T", v)
}
}
func (r *Resultset) GetFloatByName(row int, name string) (float64, error) {
if column, err := r.NameIndex(name); err != nil {
return 0, err
} else {
return r.GetFloat(row, column)
}
}
func (r *Resultset) GetString(row, column int) (string, error) {
d, err := r.GetValue(row, column)
if err != nil {
return "", err
}
switch v := d.(type) {
case string:
return v, nil
case []byte:
return hack.String(v), nil
case int, int8, int16, int32, int64,
uint, uint8, uint16, uint32, uint64:
return fmt.Sprintf("%d", v), nil
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 64), nil
case float64:
return strconv.FormatFloat(v, 'f', -1, 64), nil
case nil:
return "", nil
default:
return "", errors.Errorf("data type is %T", v)
}
}
func (r *Resultset) GetStringByName(row int, name string) (string, error) {
if column, err := r.NameIndex(name); err != nil {
return "", err
} else {
return r.GetString(row, column)
}
}

View File

@@ -0,0 +1,205 @@
package mysql
import (
"math"
"strconv"
"github.com/juju/errors"
"github.com/siddontang/go/hack"
)
func formatTextValue(value interface{}) ([]byte, error) {
switch v := value.(type) {
case int8:
return strconv.AppendInt(nil, int64(v), 10), nil
case int16:
return strconv.AppendInt(nil, int64(v), 10), nil
case int32:
return strconv.AppendInt(nil, int64(v), 10), nil
case int64:
return strconv.AppendInt(nil, int64(v), 10), nil
case int:
return strconv.AppendInt(nil, int64(v), 10), nil
case uint8:
return strconv.AppendUint(nil, uint64(v), 10), nil
case uint16:
return strconv.AppendUint(nil, uint64(v), 10), nil
case uint32:
return strconv.AppendUint(nil, uint64(v), 10), nil
case uint64:
return strconv.AppendUint(nil, uint64(v), 10), nil
case uint:
return strconv.AppendUint(nil, uint64(v), 10), nil
case float32:
return strconv.AppendFloat(nil, float64(v), 'f', -1, 64), nil
case float64:
return strconv.AppendFloat(nil, float64(v), 'f', -1, 64), nil
case []byte:
return v, nil
case string:
return hack.Slice(v), nil
default:
return nil, errors.Errorf("invalid type %T", value)
}
}
func formatBinaryValue(value interface{}) ([]byte, error) {
switch v := value.(type) {
case int8:
return Uint64ToBytes(uint64(v)), nil
case int16:
return Uint64ToBytes(uint64(v)), nil
case int32:
return Uint64ToBytes(uint64(v)), nil
case int64:
return Uint64ToBytes(uint64(v)), nil
case int:
return Uint64ToBytes(uint64(v)), nil
case uint8:
return Uint64ToBytes(uint64(v)), nil
case uint16:
return Uint64ToBytes(uint64(v)), nil
case uint32:
return Uint64ToBytes(uint64(v)), nil
case uint64:
return Uint64ToBytes(uint64(v)), nil
case uint:
return Uint64ToBytes(uint64(v)), nil
case float32:
return Uint64ToBytes(math.Float64bits(float64(v))), nil
case float64:
return Uint64ToBytes(math.Float64bits(v)), nil
case []byte:
return v, nil
case string:
return hack.Slice(v), nil
default:
return nil, errors.Errorf("invalid type %T", value)
}
}
func formatField(field *Field, value interface{}) error {
switch value.(type) {
case int8, int16, int32, int64, int:
field.Charset = 63
field.Type = MYSQL_TYPE_LONGLONG
field.Flag = BINARY_FLAG | NOT_NULL_FLAG
case uint8, uint16, uint32, uint64, uint:
field.Charset = 63
field.Type = MYSQL_TYPE_LONGLONG
field.Flag = BINARY_FLAG | NOT_NULL_FLAG | UNSIGNED_FLAG
case float32, float64:
field.Charset = 63
field.Type = MYSQL_TYPE_DOUBLE
field.Flag = BINARY_FLAG | NOT_NULL_FLAG
case string, []byte:
field.Charset = 33
field.Type = MYSQL_TYPE_VAR_STRING
default:
return errors.Errorf("unsupport type %T for resultset", value)
}
return nil
}
func BuildSimpleTextResultset(names []string, values [][]interface{}) (*Resultset, error) {
r := new(Resultset)
r.Fields = make([]*Field, len(names))
var b []byte
var err error
for i, vs := range values {
if len(vs) != len(r.Fields) {
return nil, errors.Errorf("row %d has %d column not equal %d", i, len(vs), len(r.Fields))
}
var row []byte
for j, value := range vs {
if i == 0 {
field := &Field{}
r.Fields[j] = field
field.Name = hack.Slice(names[j])
if err = formatField(field, value); err != nil {
return nil, errors.Trace(err)
}
}
b, err = formatTextValue(value)
if err != nil {
return nil, errors.Trace(err)
}
row = append(row, PutLengthEncodedString(b)...)
}
r.RowDatas = append(r.RowDatas, row)
}
return r, nil
}
func BuildSimpleBinaryResultset(names []string, values [][]interface{}) (*Resultset, error) {
r := new(Resultset)
r.Fields = make([]*Field, len(names))
var b []byte
var err error
bitmapLen := ((len(names) + 7 + 2) >> 3)
for i, vs := range values {
if len(vs) != len(r.Fields) {
return nil, errors.Errorf("row %d has %d column not equal %d", i, len(vs), len(r.Fields))
}
var row []byte
nullBitmap := make([]byte, bitmapLen)
row = append(row, 0)
row = append(row, nullBitmap...)
for j, value := range vs {
if i == 0 {
field := &Field{}
r.Fields[j] = field
field.Name = hack.Slice(names[j])
if err = formatField(field, value); err != nil {
return nil, errors.Trace(err)
}
}
if value == nil {
nullBitmap[(i+2)/8] |= (1 << (uint(i+2) % 8))
continue
}
b, err = formatBinaryValue(value)
if err != nil {
return nil, errors.Trace(err)
}
if r.Fields[j].Type == MYSQL_TYPE_VAR_STRING {
row = append(row, PutLengthEncodedString(b)...)
} else {
row = append(row, b...)
}
}
copy(row[1:], nullBitmap)
r.RowDatas = append(r.RowDatas, row)
}
return r, nil
}
func BuildSimpleResultset(names []string, values [][]interface{}, binary bool) (*Resultset, error) {
if binary {
return BuildSimpleBinaryResultset(names, values)
} else {
return BuildSimpleTextResultset(names, values)
}
}

233
vendor/github.com/siddontang/go-mysql/mysql/state.go generated vendored Normal file
View File

@@ -0,0 +1,233 @@
package mysql
const (
DEFAULT_MYSQL_STATE = "HY000"
)
var MySQLState = map[uint16]string{
ER_DUP_KEY: "23000",
ER_OUTOFMEMORY: "HY001",
ER_OUT_OF_SORTMEMORY: "HY001",
ER_CON_COUNT_ERROR: "08004",
ER_BAD_HOST_ERROR: "08S01",
ER_HANDSHAKE_ERROR: "08S01",
ER_DBACCESS_DENIED_ERROR: "42000",
ER_ACCESS_DENIED_ERROR: "28000",
ER_NO_DB_ERROR: "3D000",
ER_UNKNOWN_COM_ERROR: "08S01",
ER_BAD_NULL_ERROR: "23000",
ER_BAD_DB_ERROR: "42000",
ER_TABLE_EXISTS_ERROR: "42S01",
ER_BAD_TABLE_ERROR: "42S02",
ER_NON_UNIQ_ERROR: "23000",
ER_SERVER_SHUTDOWN: "08S01",
ER_BAD_FIELD_ERROR: "42S22",
ER_WRONG_FIELD_WITH_GROUP: "42000",
ER_WRONG_SUM_SELECT: "42000",
ER_WRONG_GROUP_FIELD: "42000",
ER_WRONG_VALUE_COUNT: "21S01",
ER_TOO_LONG_IDENT: "42000",
ER_DUP_FIELDNAME: "42S21",
ER_DUP_KEYNAME: "42000",
ER_DUP_ENTRY: "23000",
ER_WRONG_FIELD_SPEC: "42000",
ER_PARSE_ERROR: "42000",
ER_EMPTY_QUERY: "42000",
ER_NONUNIQ_TABLE: "42000",
ER_INVALID_DEFAULT: "42000",
ER_MULTIPLE_PRI_KEY: "42000",
ER_TOO_MANY_KEYS: "42000",
ER_TOO_MANY_KEY_PARTS: "42000",
ER_TOO_LONG_KEY: "42000",
ER_KEY_COLUMN_DOES_NOT_EXITS: "42000",
ER_BLOB_USED_AS_KEY: "42000",
ER_TOO_BIG_FIELDLENGTH: "42000",
ER_WRONG_AUTO_KEY: "42000",
ER_FORCING_CLOSE: "08S01",
ER_IPSOCK_ERROR: "08S01",
ER_NO_SUCH_INDEX: "42S12",
ER_WRONG_FIELD_TERMINATORS: "42000",
ER_BLOBS_AND_NO_TERMINATED: "42000",
ER_CANT_REMOVE_ALL_FIELDS: "42000",
ER_CANT_DROP_FIELD_OR_KEY: "42000",
ER_BLOB_CANT_HAVE_DEFAULT: "42000",
ER_WRONG_DB_NAME: "42000",
ER_WRONG_TABLE_NAME: "42000",
ER_TOO_BIG_SELECT: "42000",
ER_UNKNOWN_PROCEDURE: "42000",
ER_WRONG_PARAMCOUNT_TO_PROCEDURE: "42000",
ER_UNKNOWN_TABLE: "42S02",
ER_FIELD_SPECIFIED_TWICE: "42000",
ER_UNSUPPORTED_EXTENSION: "42000",
ER_TABLE_MUST_HAVE_COLUMNS: "42000",
ER_UNKNOWN_CHARACTER_SET: "42000",
ER_TOO_BIG_ROWSIZE: "42000",
ER_WRONG_OUTER_JOIN: "42000",
ER_NULL_COLUMN_IN_INDEX: "42000",
ER_PASSWORD_ANONYMOUS_USER: "42000",
ER_PASSWORD_NOT_ALLOWED: "42000",
ER_PASSWORD_NO_MATCH: "42000",
ER_WRONG_VALUE_COUNT_ON_ROW: "21S01",
ER_INVALID_USE_OF_NULL: "22004",
ER_REGEXP_ERROR: "42000",
ER_MIX_OF_GROUP_FUNC_AND_FIELDS: "42000",
ER_NONEXISTING_GRANT: "42000",
ER_TABLEACCESS_DENIED_ERROR: "42000",
ER_COLUMNACCESS_DENIED_ERROR: "42000",
ER_ILLEGAL_GRANT_FOR_TABLE: "42000",
ER_GRANT_WRONG_HOST_OR_USER: "42000",
ER_NO_SUCH_TABLE: "42S02",
ER_NONEXISTING_TABLE_GRANT: "42000",
ER_NOT_ALLOWED_COMMAND: "42000",
ER_SYNTAX_ERROR: "42000",
ER_ABORTING_CONNECTION: "08S01",
ER_NET_PACKET_TOO_LARGE: "08S01",
ER_NET_READ_ERROR_FROM_PIPE: "08S01",
ER_NET_FCNTL_ERROR: "08S01",
ER_NET_PACKETS_OUT_OF_ORDER: "08S01",
ER_NET_UNCOMPRESS_ERROR: "08S01",
ER_NET_READ_ERROR: "08S01",
ER_NET_READ_INTERRUPTED: "08S01",
ER_NET_ERROR_ON_WRITE: "08S01",
ER_NET_WRITE_INTERRUPTED: "08S01",
ER_TOO_LONG_STRING: "42000",
ER_TABLE_CANT_HANDLE_BLOB: "42000",
ER_TABLE_CANT_HANDLE_AUTO_INCREMENT: "42000",
ER_WRONG_COLUMN_NAME: "42000",
ER_WRONG_KEY_COLUMN: "42000",
ER_DUP_UNIQUE: "23000",
ER_BLOB_KEY_WITHOUT_LENGTH: "42000",
ER_PRIMARY_CANT_HAVE_NULL: "42000",
ER_TOO_MANY_ROWS: "42000",
ER_REQUIRES_PRIMARY_KEY: "42000",
ER_KEY_DOES_NOT_EXITS: "42000",
ER_CHECK_NO_SUCH_TABLE: "42000",
ER_CHECK_NOT_IMPLEMENTED: "42000",
ER_CANT_DO_THIS_DURING_AN_TRANSACTION: "25000",
ER_NEW_ABORTING_CONNECTION: "08S01",
ER_MASTER_NET_READ: "08S01",
ER_MASTER_NET_WRITE: "08S01",
ER_TOO_MANY_USER_CONNECTIONS: "42000",
ER_READ_ONLY_TRANSACTION: "25000",
ER_NO_PERMISSION_TO_CREATE_USER: "42000",
ER_LOCK_DEADLOCK: "40001",
ER_NO_REFERENCED_ROW: "23000",
ER_ROW_IS_REFERENCED: "23000",
ER_CONNECT_TO_MASTER: "08S01",
ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT: "21000",
ER_USER_LIMIT_REACHED: "42000",
ER_SPECIFIC_ACCESS_DENIED_ERROR: "42000",
ER_NO_DEFAULT: "42000",
ER_WRONG_VALUE_FOR_VAR: "42000",
ER_WRONG_TYPE_FOR_VAR: "42000",
ER_CANT_USE_OPTION_HERE: "42000",
ER_NOT_SUPPORTED_YET: "42000",
ER_WRONG_FK_DEF: "42000",
ER_OPERAND_COLUMNS: "21000",
ER_SUBQUERY_NO_1_ROW: "21000",
ER_ILLEGAL_REFERENCE: "42S22",
ER_DERIVED_MUST_HAVE_ALIAS: "42000",
ER_SELECT_REDUCED: "01000",
ER_TABLENAME_NOT_ALLOWED_HERE: "42000",
ER_NOT_SUPPORTED_AUTH_MODE: "08004",
ER_SPATIAL_CANT_HAVE_NULL: "42000",
ER_COLLATION_CHARSET_MISMATCH: "42000",
ER_WARN_TOO_FEW_RECORDS: "01000",
ER_WARN_TOO_MANY_RECORDS: "01000",
ER_WARN_NULL_TO_NOTNULL: "22004",
ER_WARN_DATA_OUT_OF_RANGE: "22003",
WARN_DATA_TRUNCATED: "01000",
ER_WRONG_NAME_FOR_INDEX: "42000",
ER_WRONG_NAME_FOR_CATALOG: "42000",
ER_UNKNOWN_STORAGE_ENGINE: "42000",
ER_TRUNCATED_WRONG_VALUE: "22007",
ER_SP_NO_RECURSIVE_CREATE: "2F003",
ER_SP_ALREADY_EXISTS: "42000",
ER_SP_DOES_NOT_EXIST: "42000",
ER_SP_LILABEL_MISMATCH: "42000",
ER_SP_LABEL_REDEFINE: "42000",
ER_SP_LABEL_MISMATCH: "42000",
ER_SP_UNINIT_VAR: "01000",
ER_SP_BADSELECT: "0A000",
ER_SP_BADRETURN: "42000",
ER_SP_BADSTATEMENT: "0A000",
ER_UPDATE_LOG_DEPRECATED_IGNORED: "42000",
ER_UPDATE_LOG_DEPRECATED_TRANSLATED: "42000",
ER_QUERY_INTERRUPTED: "70100",
ER_SP_WRONG_NO_OF_ARGS: "42000",
ER_SP_COND_MISMATCH: "42000",
ER_SP_NORETURN: "42000",
ER_SP_NORETURNEND: "2F005",
ER_SP_BAD_CURSOR_QUERY: "42000",
ER_SP_BAD_CURSOR_SELECT: "42000",
ER_SP_CURSOR_MISMATCH: "42000",
ER_SP_CURSOR_ALREADY_OPEN: "24000",
ER_SP_CURSOR_NOT_OPEN: "24000",
ER_SP_UNDECLARED_VAR: "42000",
ER_SP_FETCH_NO_DATA: "02000",
ER_SP_DUP_PARAM: "42000",
ER_SP_DUP_VAR: "42000",
ER_SP_DUP_COND: "42000",
ER_SP_DUP_CURS: "42000",
ER_SP_SUBSELECT_NYI: "0A000",
ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG: "0A000",
ER_SP_VARCOND_AFTER_CURSHNDLR: "42000",
ER_SP_CURSOR_AFTER_HANDLER: "42000",
ER_SP_CASE_NOT_FOUND: "20000",
ER_DIVISION_BY_ZERO: "22012",
ER_ILLEGAL_VALUE_FOR_TYPE: "22007",
ER_PROCACCESS_DENIED_ERROR: "42000",
ER_XAER_NOTA: "XAE04",
ER_XAER_INVAL: "XAE05",
ER_XAER_RMFAIL: "XAE07",
ER_XAER_OUTSIDE: "XAE09",
ER_XAER_RMERR: "XAE03",
ER_XA_RBROLLBACK: "XA100",
ER_NONEXISTING_PROC_GRANT: "42000",
ER_DATA_TOO_LONG: "22001",
ER_SP_BAD_SQLSTATE: "42000",
ER_CANT_CREATE_USER_WITH_GRANT: "42000",
ER_SP_DUP_HANDLER: "42000",
ER_SP_NOT_VAR_ARG: "42000",
ER_SP_NO_RETSET: "0A000",
ER_CANT_CREATE_GEOMETRY_OBJECT: "22003",
ER_TOO_BIG_SCALE: "42000",
ER_TOO_BIG_PRECISION: "42000",
ER_M_BIGGER_THAN_D: "42000",
ER_TOO_LONG_BODY: "42000",
ER_TOO_BIG_DISPLAYWIDTH: "42000",
ER_XAER_DUPID: "XAE08",
ER_DATETIME_FUNCTION_OVERFLOW: "22008",
ER_ROW_IS_REFERENCED_2: "23000",
ER_NO_REFERENCED_ROW_2: "23000",
ER_SP_BAD_VAR_SHADOW: "42000",
ER_SP_WRONG_NAME: "42000",
ER_SP_NO_AGGREGATE: "42000",
ER_MAX_PREPARED_STMT_COUNT_REACHED: "42000",
ER_NON_GROUPING_FIELD_USED: "42000",
ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED: "23000",
ER_CANT_CHANGE_TX_CHARACTERISTICS: "25001",
ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT: "42000",
ER_WRONG_PARAMETERS_TO_NATIVE_FCT: "42000",
ER_WRONG_PARAMETERS_TO_STORED_FCT: "42000",
ER_DUP_ENTRY_WITH_KEY_NAME: "23000",
ER_XA_RBTIMEOUT: "XA106",
ER_XA_RBDEADLOCK: "XA102",
ER_FUNC_INEXISTENT_NAME_COLLISION: "42000",
ER_DUP_SIGNAL_SET: "42000",
ER_SIGNAL_WARN: "01000",
ER_SIGNAL_NOT_FOUND: "02000",
ER_SIGNAL_EXCEPTION: "HY000",
ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER: "0K000",
ER_SPATIAL_MUST_HAVE_GEOM_COL: "42000",
ER_DATA_OUT_OF_RANGE: "22003",
ER_ACCESS_DENIED_NO_PASSWORD_ERROR: "28000",
ER_TRUNCATE_ILLEGAL_FK: "42000",
ER_DA_INVALID_CONDITION_NUMBER: "35000",
ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO: "23000",
ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO: "23000",
ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION: "25006",
ER_ALTER_OPERATION_NOT_SUPPORTED: "0A000",
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON: "0A000",
ER_DUP_UNKNOWN_IN_INDEX: "23000",
}

354
vendor/github.com/siddontang/go-mysql/mysql/util.go generated vendored Normal file
View File

@@ -0,0 +1,354 @@
package mysql
import (
"crypto/rand"
"crypto/sha1"
"encoding/binary"
"fmt"
"io"
"runtime"
"strings"
"github.com/juju/errors"
"github.com/siddontang/go/hack"
)
func Pstack() string {
buf := make([]byte, 1024)
n := runtime.Stack(buf, false)
return string(buf[0:n])
}
func CalcPassword(scramble, password []byte) []byte {
if len(password) == 0 {
return nil
}
// stage1Hash = SHA1(password)
crypt := sha1.New()
crypt.Write(password)
stage1 := crypt.Sum(nil)
// scrambleHash = SHA1(scramble + SHA1(stage1Hash))
// inner Hash
crypt.Reset()
crypt.Write(stage1)
hash := crypt.Sum(nil)
// outer Hash
crypt.Reset()
crypt.Write(scramble)
crypt.Write(hash)
scramble = crypt.Sum(nil)
// token = scrambleHash XOR stage1Hash
for i := range scramble {
scramble[i] ^= stage1[i]
}
return scramble
}
func RandomBuf(size int) ([]byte, error) {
buf := make([]byte, size)
if _, err := io.ReadFull(rand.Reader, buf); err != nil {
return nil, errors.Trace(err)
}
// avoid to generate '\0'
for i, b := range buf {
if uint8(b) == 0 {
buf[i] = '0'
}
}
return buf, nil
}
// little endian
func FixedLengthInt(buf []byte) uint64 {
var num uint64 = 0
for i, b := range buf {
num |= uint64(b) << (uint(i) * 8)
}
return num
}
// big endian
func BFixedLengthInt(buf []byte) uint64 {
var num uint64 = 0
for i, b := range buf {
num |= uint64(b) << (uint(len(buf)-i-1) * 8)
}
return num
}
func LengthEncodedInt(b []byte) (num uint64, isNull bool, n int) {
switch b[0] {
// 251: NULL
case 0xfb:
n = 1
isNull = true
return
// 252: value of following 2
case 0xfc:
num = uint64(b[1]) | uint64(b[2])<<8
n = 3
return
// 253: value of following 3
case 0xfd:
num = uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16
n = 4
return
// 254: value of following 8
case 0xfe:
num = uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
uint64(b[7])<<48 | uint64(b[8])<<56
n = 9
return
}
// 0-250: value of first byte
num = uint64(b[0])
n = 1
return
}
func PutLengthEncodedInt(n uint64) []byte {
switch {
case n <= 250:
return []byte{byte(n)}
case n <= 0xffff:
return []byte{0xfc, byte(n), byte(n >> 8)}
case n <= 0xffffff:
return []byte{0xfd, byte(n), byte(n >> 8), byte(n >> 16)}
case n <= 0xffffffffffffffff:
return []byte{0xfe, byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24),
byte(n >> 32), byte(n >> 40), byte(n >> 48), byte(n >> 56)}
}
return nil
}
func LengthEnodedString(b []byte) ([]byte, bool, int, error) {
// Get length
num, isNull, n := LengthEncodedInt(b)
if num < 1 {
return nil, isNull, n, nil
}
n += int(num)
// Check data length
if len(b) >= n {
return b[n-int(num) : n], false, n, nil
}
return nil, false, n, io.EOF
}
func SkipLengthEnodedString(b []byte) (int, error) {
// Get length
num, _, n := LengthEncodedInt(b)
if num < 1 {
return n, nil
}
n += int(num)
// Check data length
if len(b) >= n {
return n, nil
}
return n, io.EOF
}
func PutLengthEncodedString(b []byte) []byte {
data := make([]byte, 0, len(b)+9)
data = append(data, PutLengthEncodedInt(uint64(len(b)))...)
data = append(data, b...)
return data
}
func Uint16ToBytes(n uint16) []byte {
return []byte{
byte(n),
byte(n >> 8),
}
}
func Uint32ToBytes(n uint32) []byte {
return []byte{
byte(n),
byte(n >> 8),
byte(n >> 16),
byte(n >> 24),
}
}
func Uint64ToBytes(n uint64) []byte {
return []byte{
byte(n),
byte(n >> 8),
byte(n >> 16),
byte(n >> 24),
byte(n >> 32),
byte(n >> 40),
byte(n >> 48),
byte(n >> 56),
}
}
func FormatBinaryDate(n int, data []byte) ([]byte, error) {
switch n {
case 0:
return []byte("0000-00-00"), nil
case 4:
return []byte(fmt.Sprintf("%04d-%02d-%02d",
binary.LittleEndian.Uint16(data[:2]),
data[2],
data[3])), nil
default:
return nil, errors.Errorf("invalid date packet length %d", n)
}
}
func FormatBinaryDateTime(n int, data []byte) ([]byte, error) {
switch n {
case 0:
return []byte("0000-00-00 00:00:00"), nil
case 4:
return []byte(fmt.Sprintf("%04d-%02d-%02d 00:00:00",
binary.LittleEndian.Uint16(data[:2]),
data[2],
data[3])), nil
case 7:
return []byte(fmt.Sprintf(
"%04d-%02d-%02d %02d:%02d:%02d",
binary.LittleEndian.Uint16(data[:2]),
data[2],
data[3],
data[4],
data[5],
data[6])), nil
case 11:
return []byte(fmt.Sprintf(
"%04d-%02d-%02d %02d:%02d:%02d.%06d",
binary.LittleEndian.Uint16(data[:2]),
data[2],
data[3],
data[4],
data[5],
data[6],
binary.LittleEndian.Uint32(data[7:11]))), nil
default:
return nil, errors.Errorf("invalid datetime packet length %d", n)
}
}
func FormatBinaryTime(n int, data []byte) ([]byte, error) {
if n == 0 {
return []byte("0000-00-00"), nil
}
var sign byte
if data[0] == 1 {
sign = byte('-')
}
switch n {
case 8:
return []byte(fmt.Sprintf(
"%c%02d:%02d:%02d",
sign,
uint16(data[1])*24+uint16(data[5]),
data[6],
data[7],
)), nil
case 12:
return []byte(fmt.Sprintf(
"%c%02d:%02d:%02d.%06d",
sign,
uint16(data[1])*24+uint16(data[5]),
data[6],
data[7],
binary.LittleEndian.Uint32(data[8:12]),
)), nil
default:
return nil, errors.Errorf("invalid time packet length %d", n)
}
}
var (
DONTESCAPE = byte(255)
EncodeMap [256]byte
)
// only support utf-8
func Escape(sql string) string {
dest := make([]byte, 0, 2*len(sql))
for _, w := range hack.Slice(sql) {
if c := EncodeMap[w]; c == DONTESCAPE {
dest = append(dest, w)
} else {
dest = append(dest, '\\', c)
}
}
return string(dest)
}
func GetNetProto(addr string) string {
if strings.Contains(addr, "/") {
return "unix"
} else {
return "tcp"
}
}
// ErrorEqual returns a boolean indicating whether err1 is equal to err2.
func ErrorEqual(err1, err2 error) bool {
e1 := errors.Cause(err1)
e2 := errors.Cause(err2)
if e1 == e2 {
return true
}
if e1 == nil || e2 == nil {
return e1 == e2
}
return e1.Error() == e2.Error()
}
var encodeRef = map[byte]byte{
'\x00': '0',
'\'': '\'',
'"': '"',
'\b': 'b',
'\n': 'n',
'\r': 'r',
'\t': 't',
26: 'Z', // ctl-Z
'\\': '\\',
}
func init() {
for i := range EncodeMap {
EncodeMap[i] = DONTESCAPE
}
for i := range EncodeMap {
if to, ok := encodeRef[byte(i)]; ok {
EncodeMap[byte(i)] = to
}
}
}

View File

@@ -0,0 +1,27 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["conn.go"],
importmap = "go-common/vendor/github.com/siddontang/go-mysql/packet",
importpath = "github.com/siddontang/go-mysql/packet",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/juju/errors:go_default_library",
"//vendor/github.com/siddontang/go-mysql/mysql:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

161
vendor/github.com/siddontang/go-mysql/packet/conn.go generated vendored Normal file
View File

@@ -0,0 +1,161 @@
package packet
import (
"bufio"
"bytes"
"io"
"net"
"github.com/juju/errors"
. "github.com/siddontang/go-mysql/mysql"
)
/*
Conn is the base class to handle MySQL protocol.
*/
type Conn struct {
net.Conn
br *bufio.Reader
Sequence uint8
}
func NewConn(conn net.Conn) *Conn {
c := new(Conn)
c.br = bufio.NewReaderSize(conn, 4096)
c.Conn = conn
return c
}
func (c *Conn) ReadPacket() ([]byte, error) {
var buf bytes.Buffer
if err := c.ReadPacketTo(&buf); err != nil {
return nil, errors.Trace(err)
} else {
return buf.Bytes(), nil
}
// header := []byte{0, 0, 0, 0}
// if _, err := io.ReadFull(c.br, header); err != nil {
// return nil, ErrBadConn
// }
// length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16)
// if length < 1 {
// return nil, fmt.Errorf("invalid payload length %d", length)
// }
// sequence := uint8(header[3])
// if sequence != c.Sequence {
// return nil, fmt.Errorf("invalid sequence %d != %d", sequence, c.Sequence)
// }
// c.Sequence++
// data := make([]byte, length)
// if _, err := io.ReadFull(c.br, data); err != nil {
// return nil, ErrBadConn
// } else {
// if length < MaxPayloadLen {
// return data, nil
// }
// var buf []byte
// buf, err = c.ReadPacket()
// if err != nil {
// return nil, ErrBadConn
// } else {
// return append(data, buf...), nil
// }
// }
}
func (c *Conn) ReadPacketTo(w io.Writer) error {
header := []byte{0, 0, 0, 0}
if _, err := io.ReadFull(c.br, header); err != nil {
return ErrBadConn
}
length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16)
if length < 1 {
return errors.Errorf("invalid payload length %d", length)
}
sequence := uint8(header[3])
if sequence != c.Sequence {
return errors.Errorf("invalid sequence %d != %d", sequence, c.Sequence)
}
c.Sequence++
if n, err := io.CopyN(w, c.br, int64(length)); err != nil {
return ErrBadConn
} else if n != int64(length) {
return ErrBadConn
} else {
if length < MaxPayloadLen {
return nil
}
if err := c.ReadPacketTo(w); err != nil {
return err
}
}
return nil
}
// data already has 4 bytes header
// will modify data inplace
func (c *Conn) WritePacket(data []byte) error {
length := len(data) - 4
for length >= MaxPayloadLen {
data[0] = 0xff
data[1] = 0xff
data[2] = 0xff
data[3] = c.Sequence
if n, err := c.Write(data[:4+MaxPayloadLen]); err != nil {
return ErrBadConn
} else if n != (4 + MaxPayloadLen) {
return ErrBadConn
} else {
c.Sequence++
length -= MaxPayloadLen
data = data[MaxPayloadLen:]
}
}
data[0] = byte(length)
data[1] = byte(length >> 8)
data[2] = byte(length >> 16)
data[3] = c.Sequence
if n, err := c.Write(data); err != nil {
return ErrBadConn
} else if n != len(data) {
return ErrBadConn
} else {
c.Sequence++
return nil
}
}
func (c *Conn) ResetSequence() {
c.Sequence = 0
}
func (c *Conn) Close() error {
c.Sequence = 0
if c.Conn != nil {
return c.Conn.Close()
}
return nil
}

View File

@@ -0,0 +1,44 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"backup.go",
"binlogstreamer.go",
"binlogsyncer.go",
"const.go",
"doc.go",
"event.go",
"generic_event.go",
"json_binary.go",
"parser.go",
"row_event.go",
"time.go",
],
importmap = "go-common/vendor/github.com/siddontang/go-mysql/replication",
importpath = "github.com/siddontang/go-mysql/replication",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/juju/errors:go_default_library",
"//vendor/github.com/satori/go.uuid:go_default_library",
"//vendor/github.com/shopspring/decimal:go_default_library",
"//vendor/github.com/siddontang/go-mysql/client:go_default_library",
"//vendor/github.com/siddontang/go-mysql/mysql:go_default_library",
"//vendor/github.com/siddontang/go/hack:go_default_library",
"//vendor/github.com/sirupsen/logrus:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,96 @@
package replication
import (
"context"
"io"
"os"
"path"
"time"
"github.com/juju/errors"
. "github.com/siddontang/go-mysql/mysql"
)
// Like mysqlbinlog remote raw backup
// Backup remote binlog from position (filename, offset) and write in backupDir
func (b *BinlogSyncer) StartBackup(backupDir string, p Position, timeout time.Duration) error {
if timeout == 0 {
// a very long timeout here
timeout = 30 * 3600 * 24 * time.Second
}
// Force use raw mode
b.parser.SetRawMode(true)
os.MkdirAll(backupDir, 0755)
s, err := b.StartSync(p)
if err != nil {
return errors.Trace(err)
}
var filename string
var offset uint32
var f *os.File
defer func() {
if f != nil {
f.Close()
}
}()
for {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
e, err := s.GetEvent(ctx)
cancel()
if err == context.DeadlineExceeded {
return nil
}
if err != nil {
return errors.Trace(err)
}
offset = e.Header.LogPos
if e.Header.EventType == ROTATE_EVENT {
rotateEvent := e.Event.(*RotateEvent)
filename = string(rotateEvent.NextLogName)
if e.Header.Timestamp == 0 || offset == 0 {
// fake rotate event
continue
}
} else if e.Header.EventType == FORMAT_DESCRIPTION_EVENT {
// FormateDescriptionEvent is the first event in binlog, we will close old one and create a new
if f != nil {
f.Close()
}
if len(filename) == 0 {
return errors.Errorf("empty binlog filename for FormateDescriptionEvent")
}
f, err = os.OpenFile(path.Join(backupDir, filename), os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return errors.Trace(err)
}
// write binlog header fe'bin'
if _, err = f.Write(BinLogFileHeader); err != nil {
return errors.Trace(err)
}
}
if n, err := f.Write(e.RawData); err != nil {
return errors.Trace(err)
} else if n != len(e.RawData) {
return errors.Trace(io.ErrShortWrite)
}
}
return nil
}

View File

@@ -0,0 +1,61 @@
package replication
import (
"context"
"github.com/juju/errors"
log "github.com/sirupsen/logrus"
)
var (
ErrNeedSyncAgain = errors.New("Last sync error or closed, try sync and get event again")
ErrSyncClosed = errors.New("Sync was closed")
)
// BinlogStreamer gets the streaming event.
type BinlogStreamer struct {
ch chan *BinlogEvent
ech chan error
err error
}
// GetEvent gets the binlog event one by one, it will block until Syncer receives any events from MySQL
// or meets a sync error. You can pass a context (like Cancel or Timeout) to break the block.
func (s *BinlogStreamer) GetEvent(ctx context.Context) (*BinlogEvent, error) {
if s.err != nil {
return nil, ErrNeedSyncAgain
}
select {
case c := <-s.ch:
return c, nil
case s.err = <-s.ech:
return nil, s.err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (s *BinlogStreamer) close() {
s.closeWithError(ErrSyncClosed)
}
func (s *BinlogStreamer) closeWithError(err error) {
if err == nil {
err = ErrSyncClosed
}
log.Errorf("close sync with err: %v", err)
select {
case s.ech <- err:
default:
}
}
func newBinlogStreamer() *BinlogStreamer {
s := new(BinlogStreamer)
s.ch = make(chan *BinlogEvent, 10240)
s.ech = make(chan error, 4)
return s
}

View File

@@ -0,0 +1,742 @@
package replication
import (
"context"
"crypto/tls"
"encoding/binary"
"fmt"
"net"
"os"
"sync"
"time"
"github.com/juju/errors"
"github.com/satori/go.uuid"
"github.com/siddontang/go-mysql/client"
. "github.com/siddontang/go-mysql/mysql"
log "github.com/sirupsen/logrus"
)
var (
errSyncRunning = errors.New("Sync is running, must Close first")
)
// BinlogSyncerConfig is the configuration for BinlogSyncer.
type BinlogSyncerConfig struct {
// ServerID is the unique ID in cluster.
ServerID uint32
// Flavor is "mysql" or "mariadb", if not set, use "mysql" default.
Flavor string
// Host is for MySQL server host.
Host string
// Port is for MySQL server port.
Port uint16
// User is for MySQL user.
User string
// Password is for MySQL password.
Password string
// Localhost is local hostname if register salve.
// If not set, use os.Hostname() instead.
Localhost string
// Charset is for MySQL client character set
Charset string
// SemiSyncEnabled enables semi-sync or not.
SemiSyncEnabled bool
// RawModeEnabled is for not parsing binlog event.
RawModeEnabled bool
// If not nil, use the provided tls.Config to connect to the database using TLS/SSL.
TLSConfig *tls.Config
// Use replication.Time structure for timestamp and datetime.
// We will use Local location for timestamp and UTC location for datatime.
ParseTime bool
// Use decimal.Decimal structure for decimals.
UseDecimal bool
// RecvBufferSize sets the size in bytes of the operating system's receive buffer associated with the connection.
RecvBufferSize int
// master heartbeat period
HeartbeatPeriod time.Duration
// read timeout
ReadTimeout time.Duration
}
// BinlogSyncer syncs binlog event from server.
type BinlogSyncer struct {
m sync.RWMutex
cfg BinlogSyncerConfig
c *client.Conn
wg sync.WaitGroup
parser *BinlogParser
nextPos Position
useGTID bool
gset GTIDSet
running bool
ctx context.Context
cancel context.CancelFunc
lastConnectionID uint32
}
// NewBinlogSyncer creates the BinlogSyncer with cfg.
func NewBinlogSyncer(cfg BinlogSyncerConfig) *BinlogSyncer {
// Clear the Password to avoid outputing it in log.
pass := cfg.Password
cfg.Password = ""
log.Infof("create BinlogSyncer with config %v", cfg)
cfg.Password = pass
b := new(BinlogSyncer)
b.cfg = cfg
b.parser = NewBinlogParser()
b.parser.SetRawMode(b.cfg.RawModeEnabled)
b.parser.SetParseTime(b.cfg.ParseTime)
b.parser.SetUseDecimal(b.cfg.UseDecimal)
b.useGTID = false
b.running = false
b.ctx, b.cancel = context.WithCancel(context.Background())
return b
}
// Close closes the BinlogSyncer.
func (b *BinlogSyncer) Close() {
b.m.Lock()
defer b.m.Unlock()
b.close()
}
func (b *BinlogSyncer) close() {
if b.isClosed() {
return
}
log.Info("syncer is closing...")
b.running = false
b.cancel()
if b.c != nil {
b.c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
}
b.wg.Wait()
if b.c != nil {
b.c.Close()
}
log.Info("syncer is closed")
}
func (b *BinlogSyncer) isClosed() bool {
select {
case <-b.ctx.Done():
return true
default:
return false
}
}
func (b *BinlogSyncer) registerSlave() error {
if b.c != nil {
b.c.Close()
}
log.Infof("register slave for master server %s:%d", b.cfg.Host, b.cfg.Port)
var err error
b.c, err = client.Connect(fmt.Sprintf("%s:%d", b.cfg.Host, b.cfg.Port), b.cfg.User, b.cfg.Password, "", func(c *client.Conn) {
c.TLSConfig = b.cfg.TLSConfig
})
if err != nil {
return errors.Trace(err)
}
if len(b.cfg.Charset) != 0 {
b.c.SetCharset(b.cfg.Charset)
}
//set read timeout
if b.cfg.ReadTimeout > 0 {
b.c.SetReadDeadline(time.Now().Add(b.cfg.ReadTimeout))
}
if b.cfg.RecvBufferSize > 0 {
if tcp, ok := b.c.Conn.Conn.(*net.TCPConn); ok {
tcp.SetReadBuffer(b.cfg.RecvBufferSize)
}
}
// kill last connection id
if b.lastConnectionID > 0 {
cmd := fmt.Sprintf("KILL %d", b.lastConnectionID)
if _, err := b.c.Execute(cmd); err != nil {
log.Errorf("kill connection %d error %v", b.lastConnectionID, err)
// Unknown thread id
if code := ErrorCode(err.Error()); code != ER_NO_SUCH_THREAD {
return errors.Trace(err)
}
}
log.Infof("kill last connection id %d", b.lastConnectionID)
}
// save last last connection id for kill
b.lastConnectionID = b.c.GetConnectionID()
//for mysql 5.6+, binlog has a crc32 checksum
//before mysql 5.6, this will not work, don't matter.:-)
if r, err := b.c.Execute("SHOW GLOBAL VARIABLES LIKE 'BINLOG_CHECKSUM'"); err != nil {
return errors.Trace(err)
} else {
s, _ := r.GetString(0, 1)
if s != "" {
// maybe CRC32 or NONE
// mysqlbinlog.cc use NONE, see its below comments:
// Make a notice to the server that this client
// is checksum-aware. It does not need the first fake Rotate
// necessary checksummed.
// That preference is specified below.
if _, err = b.c.Execute(`SET @master_binlog_checksum='NONE'`); err != nil {
return errors.Trace(err)
}
// if _, err = b.c.Execute(`SET @master_binlog_checksum=@@global.binlog_checksum`); err != nil {
// return errors.Trace(err)
// }
}
}
if b.cfg.Flavor == MariaDBFlavor {
// Refer https://github.com/alibaba/canal/wiki/BinlogChange(MariaDB5&10)
// Tell the server that we understand GTIDs by setting our slave capability
// to MARIA_SLAVE_CAPABILITY_GTID = 4 (MariaDB >= 10.0.1).
if _, err := b.c.Execute("SET @mariadb_slave_capability=4"); err != nil {
return errors.Errorf("failed to set @mariadb_slave_capability=4: %v", err)
}
}
if b.cfg.HeartbeatPeriod > 0 {
_, err = b.c.Execute(fmt.Sprintf("SET @master_heartbeat_period=%d;", b.cfg.HeartbeatPeriod))
if err != nil {
log.Error("failed to set @master_heartbeat_period=%d", b.cfg.HeartbeatPeriod, err)
return errors.Trace(err)
}
}
if err = b.writeRegisterSlaveCommand(); err != nil {
return errors.Trace(err)
}
if _, err = b.c.ReadOKPacket(); err != nil {
return errors.Trace(err)
}
return nil
}
func (b *BinlogSyncer) enableSemiSync() error {
if !b.cfg.SemiSyncEnabled {
return nil
}
if r, err := b.c.Execute("SHOW VARIABLES LIKE 'rpl_semi_sync_master_enabled';"); err != nil {
return errors.Trace(err)
} else {
s, _ := r.GetString(0, 1)
if s != "ON" {
log.Errorf("master does not support semi synchronous replication, use no semi-sync")
b.cfg.SemiSyncEnabled = false
return nil
}
}
_, err := b.c.Execute(`SET @rpl_semi_sync_slave = 1;`)
if err != nil {
return errors.Trace(err)
}
return nil
}
func (b *BinlogSyncer) prepare() error {
if b.isClosed() {
return errors.Trace(ErrSyncClosed)
}
if err := b.registerSlave(); err != nil {
return errors.Trace(err)
}
if err := b.enableSemiSync(); err != nil {
return errors.Trace(err)
}
return nil
}
func (b *BinlogSyncer) startDumpStream() *BinlogStreamer {
b.running = true
s := newBinlogStreamer()
b.wg.Add(1)
go b.onStream(s)
return s
}
// GetNextPosition returns the next position of the syncer
func (b *BinlogSyncer) GetNextPosition() Position {
return b.nextPos
}
// StartSync starts syncing from the `pos` position.
func (b *BinlogSyncer) StartSync(pos Position) (*BinlogStreamer, error) {
log.Infof("begin to sync binlog from position %s", pos)
b.m.Lock()
defer b.m.Unlock()
if b.running {
return nil, errors.Trace(errSyncRunning)
}
if err := b.prepareSyncPos(pos); err != nil {
return nil, errors.Trace(err)
}
return b.startDumpStream(), nil
}
// StartSyncGTID starts syncing from the `gset` GTIDSet.
func (b *BinlogSyncer) StartSyncGTID(gset GTIDSet) (*BinlogStreamer, error) {
log.Infof("begin to sync binlog from GTID %s", gset)
b.useGTID = true
b.gset = gset
b.m.Lock()
defer b.m.Unlock()
if b.running {
return nil, errors.Trace(errSyncRunning)
}
if err := b.prepare(); err != nil {
return nil, errors.Trace(err)
}
var err error
if b.cfg.Flavor != MariaDBFlavor {
// default use MySQL
err = b.writeBinlogDumpMysqlGTIDCommand(gset)
} else {
err = b.writeBinlogDumpMariadbGTIDCommand(gset)
}
if err != nil {
return nil, err
}
return b.startDumpStream(), nil
}
func (b *BinlogSyncer) writeBinlogDumpCommand(p Position) error {
b.c.ResetSequence()
data := make([]byte, 4+1+4+2+4+len(p.Name))
pos := 4
data[pos] = COM_BINLOG_DUMP
pos++
binary.LittleEndian.PutUint32(data[pos:], p.Pos)
pos += 4
binary.LittleEndian.PutUint16(data[pos:], BINLOG_DUMP_NEVER_STOP)
pos += 2
binary.LittleEndian.PutUint32(data[pos:], b.cfg.ServerID)
pos += 4
copy(data[pos:], p.Name)
return b.c.WritePacket(data)
}
func (b *BinlogSyncer) writeBinlogDumpMysqlGTIDCommand(gset GTIDSet) error {
p := Position{"", 4}
gtidData := gset.Encode()
b.c.ResetSequence()
data := make([]byte, 4+1+2+4+4+len(p.Name)+8+4+len(gtidData))
pos := 4
data[pos] = COM_BINLOG_DUMP_GTID
pos++
binary.LittleEndian.PutUint16(data[pos:], 0)
pos += 2
binary.LittleEndian.PutUint32(data[pos:], b.cfg.ServerID)
pos += 4
binary.LittleEndian.PutUint32(data[pos:], uint32(len(p.Name)))
pos += 4
n := copy(data[pos:], p.Name)
pos += n
binary.LittleEndian.PutUint64(data[pos:], uint64(p.Pos))
pos += 8
binary.LittleEndian.PutUint32(data[pos:], uint32(len(gtidData)))
pos += 4
n = copy(data[pos:], gtidData)
pos += n
data = data[0:pos]
return b.c.WritePacket(data)
}
func (b *BinlogSyncer) writeBinlogDumpMariadbGTIDCommand(gset GTIDSet) error {
// Copy from vitess
startPos := gset.String()
// Set the slave_connect_state variable before issuing COM_BINLOG_DUMP to
// provide the start position in GTID form.
query := fmt.Sprintf("SET @slave_connect_state='%s'", startPos)
if _, err := b.c.Execute(query); err != nil {
return errors.Errorf("failed to set @slave_connect_state='%s': %v", startPos, err)
}
// Real slaves set this upon connecting if their gtid_strict_mode option was
// enabled. We always use gtid_strict_mode because we need it to make our
// internal GTID comparisons safe.
if _, err := b.c.Execute("SET @slave_gtid_strict_mode=1"); err != nil {
return errors.Errorf("failed to set @slave_gtid_strict_mode=1: %v", err)
}
// Since we use @slave_connect_state, the file and position here are ignored.
return b.writeBinlogDumpCommand(Position{"", 0})
}
// localHostname returns the hostname that register slave would register as.
func (b *BinlogSyncer) localHostname() string {
if len(b.cfg.Localhost) == 0 {
h, _ := os.Hostname()
return h
}
return b.cfg.Localhost
}
func (b *BinlogSyncer) writeRegisterSlaveCommand() error {
b.c.ResetSequence()
hostname := b.localHostname()
// This should be the name of slave host not the host we are connecting to.
data := make([]byte, 4+1+4+1+len(hostname)+1+len(b.cfg.User)+1+len(b.cfg.Password)+2+4+4)
pos := 4
data[pos] = COM_REGISTER_SLAVE
pos++
binary.LittleEndian.PutUint32(data[pos:], b.cfg.ServerID)
pos += 4
// This should be the name of slave hostname not the host we are connecting to.
data[pos] = uint8(len(hostname))
pos++
n := copy(data[pos:], hostname)
pos += n
data[pos] = uint8(len(b.cfg.User))
pos++
n = copy(data[pos:], b.cfg.User)
pos += n
data[pos] = uint8(len(b.cfg.Password))
pos++
n = copy(data[pos:], b.cfg.Password)
pos += n
binary.LittleEndian.PutUint16(data[pos:], b.cfg.Port)
pos += 2
//replication rank, not used
binary.LittleEndian.PutUint32(data[pos:], 0)
pos += 4
// master ID, 0 is OK
binary.LittleEndian.PutUint32(data[pos:], 0)
return b.c.WritePacket(data)
}
func (b *BinlogSyncer) replySemiSyncACK(p Position) error {
b.c.ResetSequence()
data := make([]byte, 4+1+8+len(p.Name))
pos := 4
// semi sync indicator
data[pos] = SemiSyncIndicator
pos++
binary.LittleEndian.PutUint64(data[pos:], uint64(p.Pos))
pos += 8
copy(data[pos:], p.Name)
err := b.c.WritePacket(data)
if err != nil {
return errors.Trace(err)
}
return nil
}
func (b *BinlogSyncer) retrySync() error {
b.m.Lock()
defer b.m.Unlock()
b.parser.Reset()
if b.useGTID {
log.Infof("begin to re-sync from %s", b.gset.String())
if err := b.prepareSyncGTID(b.gset); err != nil {
return errors.Trace(err)
}
} else {
log.Infof("begin to re-sync from %s", b.nextPos)
if err := b.prepareSyncPos(b.nextPos); err != nil {
return errors.Trace(err)
}
}
return nil
}
func (b *BinlogSyncer) prepareSyncPos(pos Position) error {
// always start from position 4
if pos.Pos < 4 {
pos.Pos = 4
}
if err := b.prepare(); err != nil {
return errors.Trace(err)
}
if err := b.writeBinlogDumpCommand(pos); err != nil {
return errors.Trace(err)
}
return nil
}
func (b *BinlogSyncer) prepareSyncGTID(gset GTIDSet) error {
var err error
if err = b.prepare(); err != nil {
return errors.Trace(err)
}
if b.cfg.Flavor != MariaDBFlavor {
// default use MySQL
err = b.writeBinlogDumpMysqlGTIDCommand(gset)
} else {
err = b.writeBinlogDumpMariadbGTIDCommand(gset)
}
if err != nil {
return err
}
return nil
}
func (b *BinlogSyncer) onStream(s *BinlogStreamer) {
defer func() {
if e := recover(); e != nil {
s.closeWithError(fmt.Errorf("Err: %v\n Stack: %s", e, Pstack()))
}
b.wg.Done()
}()
for {
data, err := b.c.ReadPacket()
if err != nil {
log.Error(err)
// we meet connection error, should re-connect again with
// last nextPos or nextGTID we got.
if len(b.nextPos.Name) == 0 && b.gset == nil {
// we can't get the correct position, close.
s.closeWithError(err)
return
}
// TODO: add a max retry count.
for {
select {
case <-b.ctx.Done():
s.close()
return
case <-time.After(time.Second):
if err = b.retrySync(); err != nil {
log.Errorf("retry sync err: %v, wait 1s and retry again", err)
continue
}
}
break
}
// we connect the server and begin to re-sync again.
continue
}
//set read timeout
if b.cfg.ReadTimeout > 0 {
b.c.SetReadDeadline(time.Now().Add(b.cfg.ReadTimeout))
}
switch data[0] {
case OK_HEADER:
if err = b.parseEvent(s, data); err != nil {
s.closeWithError(err)
return
}
case ERR_HEADER:
err = b.c.HandleErrorPacket(data)
s.closeWithError(err)
return
case EOF_HEADER:
// Refer http://dev.mysql.com/doc/internals/en/packet-EOF_Packet.html
// In the MySQL client/server protocol, EOF and OK packets serve the same purpose.
// Some users told me that they received EOF packet here, but I don't know why.
// So we only log a message and retry ReadPacket.
log.Info("receive EOF packet, retry ReadPacket")
continue
default:
log.Errorf("invalid stream header %c", data[0])
continue
}
}
}
func (b *BinlogSyncer) parseEvent(s *BinlogStreamer, data []byte) error {
//skip OK byte, 0x00
data = data[1:]
needACK := false
if b.cfg.SemiSyncEnabled && (data[0] == SemiSyncIndicator) {
needACK = (data[1] == 0x01)
//skip semi sync header
data = data[2:]
}
e, err := b.parser.Parse(data)
if err != nil {
return errors.Trace(err)
}
if e.Header.LogPos > 0 {
// Some events like FormatDescriptionEvent return 0, ignore.
b.nextPos.Pos = e.Header.LogPos
}
switch event := e.Event.(type) {
case *RotateEvent:
b.nextPos.Name = string(event.NextLogName)
b.nextPos.Pos = uint32(event.Position)
log.Infof("rotate to %s", b.nextPos)
case *GTIDEvent:
if !b.useGTID {
break
}
u, _ := uuid.FromBytes(event.SID)
err := b.gset.Update(fmt.Sprintf("%s:%d", u.String(), event.GNO))
if err != nil {
return errors.Trace(err)
}
case *MariadbGTIDEvent:
if !b.useGTID {
break
}
GTID := event.GTID
err := b.gset.Update(fmt.Sprintf("%d-%d-%d", GTID.DomainID, GTID.ServerID, GTID.SequenceNumber))
if err != nil {
return errors.Trace(err)
}
case *XIDEvent:
event.GSet = b.getGtidSet()
case *QueryEvent:
event.GSet = b.getGtidSet()
}
needStop := false
select {
case s.ch <- e:
case <-b.ctx.Done():
needStop = true
}
if needACK {
err := b.replySemiSyncACK(b.nextPos)
if err != nil {
return errors.Trace(err)
}
}
if needStop {
return errors.New("sync is been closing...")
}
return nil
}
func (b *BinlogSyncer) getGtidSet() GTIDSet {
var gtidSet GTIDSet
if !b.useGTID {
return nil
}
if b.cfg.Flavor != MariaDBFlavor {
gtidSet, _ = ParseGTIDSet(MySQLFlavor, b.gset.String())
} else {
gtidSet, _ = ParseGTIDSet(MariaDBFlavor, b.gset.String())
}
return gtidSet
}
// LastConnectionID returns last connectionID.
func (b *BinlogSyncer) LastConnectionID() uint32 {
return b.lastConnectionID
}

View File

@@ -0,0 +1,185 @@
package replication
const (
//we only support MySQL 5.0.0+ binlog format, maybe???
MinBinlogVersion = 4
)
var (
//binlog header [ fe `bin` ]
BinLogFileHeader []byte = []byte{0xfe, 0x62, 0x69, 0x6e}
SemiSyncIndicator byte = 0xef
)
const (
LOG_EVENT_BINLOG_IN_USE_F uint16 = 0x0001
LOG_EVENT_FORCED_ROTATE_F uint16 = 0x0002
LOG_EVENT_THREAD_SPECIFIC_F uint16 = 0x0004
LOG_EVENT_SUPPRESS_USE_F uint16 = 0x0008
LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F uint16 = 0x0010
LOG_EVENT_ARTIFICIAL_F uint16 = 0x0020
LOG_EVENT_RELAY_LOG_F uint16 = 0x0040
LOG_EVENT_IGNORABLE_F uint16 = 0x0080
LOG_EVENT_NO_FILTER_F uint16 = 0x0100
LOG_EVENT_MTS_ISOLATE_F uint16 = 0x0200
)
const (
BINLOG_DUMP_NEVER_STOP uint16 = 0x00
BINLOG_DUMP_NON_BLOCK uint16 = 0x01
BINLOG_THROUGH_POSITION uint16 = 0x02
BINLOG_THROUGH_GTID uint16 = 0x04
)
const (
BINLOG_ROW_IMAGE_FULL = "FULL"
BINLOG_ROW_IAMGE_MINIMAL = "MINIMAL"
BINLOG_ROW_IMAGE_NOBLOB = "NOBLOB"
)
type EventType byte
const (
UNKNOWN_EVENT EventType = iota
START_EVENT_V3
QUERY_EVENT
STOP_EVENT
ROTATE_EVENT
INTVAR_EVENT
LOAD_EVENT
SLAVE_EVENT
CREATE_FILE_EVENT
APPEND_BLOCK_EVENT
EXEC_LOAD_EVENT
DELETE_FILE_EVENT
NEW_LOAD_EVENT
RAND_EVENT
USER_VAR_EVENT
FORMAT_DESCRIPTION_EVENT
XID_EVENT
BEGIN_LOAD_QUERY_EVENT
EXECUTE_LOAD_QUERY_EVENT
TABLE_MAP_EVENT
WRITE_ROWS_EVENTv0
UPDATE_ROWS_EVENTv0
DELETE_ROWS_EVENTv0
WRITE_ROWS_EVENTv1
UPDATE_ROWS_EVENTv1
DELETE_ROWS_EVENTv1
INCIDENT_EVENT
HEARTBEAT_EVENT
IGNORABLE_EVENT
ROWS_QUERY_EVENT
WRITE_ROWS_EVENTv2
UPDATE_ROWS_EVENTv2
DELETE_ROWS_EVENTv2
GTID_EVENT
ANONYMOUS_GTID_EVENT
PREVIOUS_GTIDS_EVENT
)
const (
// MariaDB event starts from 160
MARIADB_ANNOTATE_ROWS_EVENT EventType = 160 + iota
MARIADB_BINLOG_CHECKPOINT_EVENT
MARIADB_GTID_EVENT
MARIADB_GTID_LIST_EVENT
)
func (e EventType) String() string {
switch e {
case UNKNOWN_EVENT:
return "UnknownEvent"
case START_EVENT_V3:
return "StartEventV3"
case QUERY_EVENT:
return "QueryEvent"
case STOP_EVENT:
return "StopEvent"
case ROTATE_EVENT:
return "RotateEvent"
case INTVAR_EVENT:
return "IntVarEvent"
case LOAD_EVENT:
return "LoadEvent"
case SLAVE_EVENT:
return "SlaveEvent"
case CREATE_FILE_EVENT:
return "CreateFileEvent"
case APPEND_BLOCK_EVENT:
return "AppendBlockEvent"
case EXEC_LOAD_EVENT:
return "ExecLoadEvent"
case DELETE_FILE_EVENT:
return "DeleteFileEvent"
case NEW_LOAD_EVENT:
return "NewLoadEvent"
case RAND_EVENT:
return "RandEvent"
case USER_VAR_EVENT:
return "UserVarEvent"
case FORMAT_DESCRIPTION_EVENT:
return "FormatDescriptionEvent"
case XID_EVENT:
return "XIDEvent"
case BEGIN_LOAD_QUERY_EVENT:
return "BeginLoadQueryEvent"
case EXECUTE_LOAD_QUERY_EVENT:
return "ExectueLoadQueryEvent"
case TABLE_MAP_EVENT:
return "TableMapEvent"
case WRITE_ROWS_EVENTv0:
return "WriteRowsEventV0"
case UPDATE_ROWS_EVENTv0:
return "UpdateRowsEventV0"
case DELETE_ROWS_EVENTv0:
return "DeleteRowsEventV0"
case WRITE_ROWS_EVENTv1:
return "WriteRowsEventV1"
case UPDATE_ROWS_EVENTv1:
return "UpdateRowsEventV1"
case DELETE_ROWS_EVENTv1:
return "DeleteRowsEventV1"
case INCIDENT_EVENT:
return "IncidentEvent"
case HEARTBEAT_EVENT:
return "HeartbeatEvent"
case IGNORABLE_EVENT:
return "IgnorableEvent"
case ROWS_QUERY_EVENT:
return "RowsQueryEvent"
case WRITE_ROWS_EVENTv2:
return "WriteRowsEventV2"
case UPDATE_ROWS_EVENTv2:
return "UpdateRowsEventV2"
case DELETE_ROWS_EVENTv2:
return "DeleteRowsEventV2"
case GTID_EVENT:
return "GTIDEvent"
case ANONYMOUS_GTID_EVENT:
return "AnonymousGTIDEvent"
case PREVIOUS_GTIDS_EVENT:
return "PreviousGTIDsEvent"
case MARIADB_ANNOTATE_ROWS_EVENT:
return "MariadbAnnotateRowsEvent"
case MARIADB_BINLOG_CHECKPOINT_EVENT:
return "MariadbBinLogCheckPointEvent"
case MARIADB_GTID_EVENT:
return "MariadbGTIDEvent"
case MARIADB_GTID_LIST_EVENT:
return "MariadbGTIDListEvent"
default:
return "UnknownEvent"
}
}
const (
BINLOG_CHECKSUM_ALG_OFF byte = 0 // Events are without checksum though its generator
// is checksum-capable New Master (NM).
BINLOG_CHECKSUM_ALG_CRC32 byte = 1 // CRC32 of zlib algorithm.
// BINLOG_CHECKSUM_ALG_ENUM_END, // the cut line: valid alg range is [1, 0x7f].
BINLOG_CHECKSUM_ALG_UNDEF byte = 255 // special value to tag undetermined yet checksum
// or events from checksum-unaware servers
)

View File

@@ -0,0 +1,8 @@
/*
Replication package is to handle MySQL replication protocol.
Todo:
+ Get table information when handing rows event.
*/
package replication

View File

@@ -0,0 +1,487 @@
package replication
import (
"encoding/binary"
//"encoding/hex"
"fmt"
"io"
"strconv"
"strings"
"time"
"unicode"
"github.com/juju/errors"
"github.com/satori/go.uuid"
. "github.com/siddontang/go-mysql/mysql"
)
const (
EventHeaderSize = 19
SidLength = 16
LogicalTimestampTypeCode = 2
PartLogicalTimestampLength = 8
)
type BinlogEvent struct {
// raw binlog data, including crc32 checksum if exists
RawData []byte
Header *EventHeader
Event Event
}
func (e *BinlogEvent) Dump(w io.Writer) {
e.Header.Dump(w)
e.Event.Dump(w)
}
type Event interface {
//Dump Event, format like python-mysql-replication
Dump(w io.Writer)
Decode(data []byte) error
}
type EventError struct {
Header *EventHeader
//Error message
Err string
//Event data
Data []byte
}
func (e *EventError) Error() string {
return e.Err
}
type EventHeader struct {
Timestamp uint32
EventType EventType
ServerID uint32
EventSize uint32
LogPos uint32
Flags uint16
}
func (h *EventHeader) Decode(data []byte) error {
if len(data) < EventHeaderSize {
return errors.Errorf("header size too short %d, must 19", len(data))
}
pos := 0
h.Timestamp = binary.LittleEndian.Uint32(data[pos:])
pos += 4
h.EventType = EventType(data[pos])
pos++
h.ServerID = binary.LittleEndian.Uint32(data[pos:])
pos += 4
h.EventSize = binary.LittleEndian.Uint32(data[pos:])
pos += 4
h.LogPos = binary.LittleEndian.Uint32(data[pos:])
pos += 4
h.Flags = binary.LittleEndian.Uint16(data[pos:])
pos += 2
if h.EventSize < uint32(EventHeaderSize) {
return errors.Errorf("invalid event size %d, must >= 19", h.EventSize)
}
return nil
}
func (h *EventHeader) Dump(w io.Writer) {
fmt.Fprintf(w, "=== %s ===\n", EventType(h.EventType))
fmt.Fprintf(w, "Date: %s\n", time.Unix(int64(h.Timestamp), 0).Format(TimeFormat))
fmt.Fprintf(w, "Log position: %d\n", h.LogPos)
fmt.Fprintf(w, "Event size: %d\n", h.EventSize)
}
var (
checksumVersionSplitMysql []int = []int{5, 6, 1}
checksumVersionProductMysql int = (checksumVersionSplitMysql[0]*256+checksumVersionSplitMysql[1])*256 + checksumVersionSplitMysql[2]
checksumVersionSplitMariaDB []int = []int{5, 3, 0}
checksumVersionProductMariaDB int = (checksumVersionSplitMariaDB[0]*256+checksumVersionSplitMariaDB[1])*256 + checksumVersionSplitMariaDB[2]
)
// server version format X.Y.Zabc, a is not . or number
func splitServerVersion(server string) []int {
seps := strings.Split(server, ".")
if len(seps) < 3 {
return []int{0, 0, 0}
}
x, _ := strconv.Atoi(seps[0])
y, _ := strconv.Atoi(seps[1])
index := 0
for i, c := range seps[2] {
if !unicode.IsNumber(c) {
index = i
break
}
}
z, _ := strconv.Atoi(seps[2][0:index])
return []int{x, y, z}
}
func calcVersionProduct(server string) int {
versionSplit := splitServerVersion(server)
return ((versionSplit[0]*256+versionSplit[1])*256 + versionSplit[2])
}
type FormatDescriptionEvent struct {
Version uint16
//len = 50
ServerVersion []byte
CreateTimestamp uint32
EventHeaderLength uint8
EventTypeHeaderLengths []byte
// 0 is off, 1 is for CRC32, 255 is undefined
ChecksumAlgorithm byte
}
func (e *FormatDescriptionEvent) Decode(data []byte) error {
pos := 0
e.Version = binary.LittleEndian.Uint16(data[pos:])
pos += 2
e.ServerVersion = make([]byte, 50)
copy(e.ServerVersion, data[pos:])
pos += 50
e.CreateTimestamp = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.EventHeaderLength = data[pos]
pos++
if e.EventHeaderLength != byte(EventHeaderSize) {
return errors.Errorf("invalid event header length %d, must 19", e.EventHeaderLength)
}
server := string(e.ServerVersion)
checksumProduct := checksumVersionProductMysql
if strings.Contains(strings.ToLower(server), "mariadb") {
checksumProduct = checksumVersionProductMariaDB
}
if calcVersionProduct(string(e.ServerVersion)) >= checksumProduct {
// here, the last 5 bytes is 1 byte check sum alg type and 4 byte checksum if exists
e.ChecksumAlgorithm = data[len(data)-5]
e.EventTypeHeaderLengths = data[pos : len(data)-5]
} else {
e.ChecksumAlgorithm = BINLOG_CHECKSUM_ALG_UNDEF
e.EventTypeHeaderLengths = data[pos:]
}
return nil
}
func (e *FormatDescriptionEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Version: %d\n", e.Version)
fmt.Fprintf(w, "Server version: %s\n", e.ServerVersion)
//fmt.Fprintf(w, "Create date: %s\n", time.Unix(int64(e.CreateTimestamp), 0).Format(TimeFormat))
fmt.Fprintf(w, "Checksum algorithm: %d\n", e.ChecksumAlgorithm)
//fmt.Fprintf(w, "Event header lengths: \n%s", hex.Dump(e.EventTypeHeaderLengths))
fmt.Fprintln(w)
}
type RotateEvent struct {
Position uint64
NextLogName []byte
}
func (e *RotateEvent) Decode(data []byte) error {
e.Position = binary.LittleEndian.Uint64(data[0:])
e.NextLogName = data[8:]
return nil
}
func (e *RotateEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Position: %d\n", e.Position)
fmt.Fprintf(w, "Next log name: %s\n", e.NextLogName)
fmt.Fprintln(w)
}
type XIDEvent struct {
XID uint64
// in fact XIDEvent dosen't have the GTIDSet information, just for beneficial to use
GSet GTIDSet
}
func (e *XIDEvent) Decode(data []byte) error {
e.XID = binary.LittleEndian.Uint64(data)
return nil
}
func (e *XIDEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "XID: %d\n", e.XID)
if e.GSet != nil {
fmt.Fprintf(w, "GTIDSet: %s\n", e.GSet.String())
}
fmt.Fprintln(w)
}
type QueryEvent struct {
SlaveProxyID uint32
ExecutionTime uint32
ErrorCode uint16
StatusVars []byte
Schema []byte
Query []byte
// in fact QueryEvent dosen't have the GTIDSet information, just for beneficial to use
GSet GTIDSet
}
func (e *QueryEvent) Decode(data []byte) error {
pos := 0
e.SlaveProxyID = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.ExecutionTime = binary.LittleEndian.Uint32(data[pos:])
pos += 4
schemaLength := uint8(data[pos])
pos++
e.ErrorCode = binary.LittleEndian.Uint16(data[pos:])
pos += 2
statusVarsLength := binary.LittleEndian.Uint16(data[pos:])
pos += 2
e.StatusVars = data[pos : pos+int(statusVarsLength)]
pos += int(statusVarsLength)
e.Schema = data[pos : pos+int(schemaLength)]
pos += int(schemaLength)
//skip 0x00
pos++
e.Query = data[pos:]
return nil
}
func (e *QueryEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Slave proxy ID: %d\n", e.SlaveProxyID)
fmt.Fprintf(w, "Execution time: %d\n", e.ExecutionTime)
fmt.Fprintf(w, "Error code: %d\n", e.ErrorCode)
//fmt.Fprintf(w, "Status vars: \n%s", hex.Dump(e.StatusVars))
fmt.Fprintf(w, "Schema: %s\n", e.Schema)
fmt.Fprintf(w, "Query: %s\n", e.Query)
if e.GSet != nil {
fmt.Fprintf(w, "GTIDSet: %s\n", e.GSet.String())
}
fmt.Fprintln(w)
}
type GTIDEvent struct {
CommitFlag uint8
SID []byte
GNO int64
LastCommitted int64
SequenceNumber int64
}
func (e *GTIDEvent) Decode(data []byte) error {
pos := 0
e.CommitFlag = uint8(data[pos])
pos++
e.SID = data[pos : pos+SidLength]
pos += SidLength
e.GNO = int64(binary.LittleEndian.Uint64(data[pos:]))
pos += 8
if len(data) >= 42 {
if uint8(data[pos]) == LogicalTimestampTypeCode {
pos++
e.LastCommitted = int64(binary.LittleEndian.Uint64(data[pos:]))
pos += PartLogicalTimestampLength
e.SequenceNumber = int64(binary.LittleEndian.Uint64(data[pos:]))
}
}
return nil
}
func (e *GTIDEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Commit flag: %d\n", e.CommitFlag)
u, _ := uuid.FromBytes(e.SID)
fmt.Fprintf(w, "GTID_NEXT: %s:%d\n", u.String(), e.GNO)
fmt.Fprintf(w, "LAST_COMMITTED: %d\n", e.LastCommitted)
fmt.Fprintf(w, "SEQUENCE_NUMBER: %d\n", e.SequenceNumber)
fmt.Fprintln(w)
}
type BeginLoadQueryEvent struct {
FileID uint32
BlockData []byte
}
func (e *BeginLoadQueryEvent) Decode(data []byte) error {
pos := 0
e.FileID = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.BlockData = data[pos:]
return nil
}
func (e *BeginLoadQueryEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "File ID: %d\n", e.FileID)
fmt.Fprintf(w, "Block data: %s\n", e.BlockData)
fmt.Fprintln(w)
}
type ExecuteLoadQueryEvent struct {
SlaveProxyID uint32
ExecutionTime uint32
SchemaLength uint8
ErrorCode uint16
StatusVars uint16
FileID uint32
StartPos uint32
EndPos uint32
DupHandlingFlags uint8
}
func (e *ExecuteLoadQueryEvent) Decode(data []byte) error {
pos := 0
e.SlaveProxyID = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.ExecutionTime = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.SchemaLength = uint8(data[pos])
pos++
e.ErrorCode = binary.LittleEndian.Uint16(data[pos:])
pos += 2
e.StatusVars = binary.LittleEndian.Uint16(data[pos:])
pos += 2
e.FileID = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.StartPos = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.EndPos = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.DupHandlingFlags = uint8(data[pos])
return nil
}
func (e *ExecuteLoadQueryEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Slave proxy ID: %d\n", e.SlaveProxyID)
fmt.Fprintf(w, "Execution time: %d\n", e.ExecutionTime)
fmt.Fprintf(w, "Schame length: %d\n", e.SchemaLength)
fmt.Fprintf(w, "Error code: %d\n", e.ErrorCode)
fmt.Fprintf(w, "Status vars length: %d\n", e.StatusVars)
fmt.Fprintf(w, "File ID: %d\n", e.FileID)
fmt.Fprintf(w, "Start pos: %d\n", e.StartPos)
fmt.Fprintf(w, "End pos: %d\n", e.EndPos)
fmt.Fprintf(w, "Dup handling flags: %d\n", e.DupHandlingFlags)
fmt.Fprintln(w)
}
// case MARIADB_ANNOTATE_ROWS_EVENT:
// return "MariadbAnnotateRowsEvent"
type MariadbAnnotateRowsEvent struct {
Query []byte
}
func (e *MariadbAnnotateRowsEvent) Decode(data []byte) error {
e.Query = data
return nil
}
func (e *MariadbAnnotateRowsEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Query: %s\n", e.Query)
fmt.Fprintln(w)
}
type MariadbBinlogCheckPointEvent struct {
Info []byte
}
func (e *MariadbBinlogCheckPointEvent) Decode(data []byte) error {
e.Info = data
return nil
}
func (e *MariadbBinlogCheckPointEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Info: %s\n", e.Info)
fmt.Fprintln(w)
}
type MariadbGTIDEvent struct {
GTID MariadbGTID
}
func (e *MariadbGTIDEvent) Decode(data []byte) error {
e.GTID.SequenceNumber = binary.LittleEndian.Uint64(data)
e.GTID.DomainID = binary.LittleEndian.Uint32(data[8:])
// we don't care commit id now, maybe later
return nil
}
func (e *MariadbGTIDEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "GTID: %s\n", e.GTID)
fmt.Fprintln(w)
}
type MariadbGTIDListEvent struct {
GTIDs []MariadbGTID
}
func (e *MariadbGTIDListEvent) Decode(data []byte) error {
pos := 0
v := binary.LittleEndian.Uint32(data[pos:])
pos += 4
count := v & uint32((1<<28)-1)
e.GTIDs = make([]MariadbGTID, count)
for i := uint32(0); i < count; i++ {
e.GTIDs[i].DomainID = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.GTIDs[i].ServerID = binary.LittleEndian.Uint32(data[pos:])
pos += 4
e.GTIDs[i].SequenceNumber = binary.LittleEndian.Uint64(data[pos:])
}
return nil
}
func (e *MariadbGTIDListEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Lists: %v\n", e.GTIDs)
fmt.Fprintln(w)
}

View File

@@ -0,0 +1,171 @@
package replication
import (
"encoding/hex"
"fmt"
"io"
)
// we don't parse all event, so some we will use GenericEvent instead
type GenericEvent struct {
Data []byte
}
func (e *GenericEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Event data: \n%s", hex.Dump(e.Data))
fmt.Fprintln(w)
}
func (e *GenericEvent) Decode(data []byte) error {
e.Data = data
return nil
}
//below events are generic events, maybe later I will consider handle some.
// type StartEventV3 struct {
// Version uint16
// ServerVersion [50]byte
// CreateTimestamp uint32
// }
// type StopEvent struct{}
// type LoadEvent struct {
// SlaveProxyID uint32
// ExecTime uint32
// SkipLines uint32
// TableNameLen uint8
// SchemaLen uint8
// NumFileds uint32
// FieldTerm uint8
// EnclosedBy uint8
// LineTerm uint8
// LineStart uint8
// EscapedBy uint8
// OptFlags uint8
// EmptyFlags uint8
// //len = 1 * NumFields
// FieldNameLengths []byte
// //len = sum(FieldNameLengths) + NumFields
// //array of nul-terminated strings
// FieldNames []byte
// //len = TableNameLen + 1, nul-terminated string
// TableName []byte
// //len = SchemaLen + 1, nul-terminated string
// SchemaName []byte
// //string.NUL
// FileName []byte
// }
// type NewLoadEvent struct {
// SlaveProxyID uint32
// ExecTime uint32
// SkipLines uint32
// TableNameLen uint8
// SchemaLen uint8
// NumFields uint32
// FieldTermLen uint8
// FieldTerm []byte
// EnclosedByLen uint8
// EnclosedBy []byte
// LineTermLen uint8
// LineTerm []byte
// LineStartLen uint8
// LineStart []byte
// EscapedByLen uint8
// EscapedBy []byte
// OptFlags uint8
// //len = 1 * NumFields
// FieldNameLengths []byte
// //len = sum(FieldNameLengths) + NumFields
// //array of nul-terminated strings
// FieldNames []byte
// //len = TableNameLen, nul-terminated string
// TableName []byte
// //len = SchemaLen, nul-terminated string
// SchemaName []byte
// //string.EOF
// FileName []byte
// }
// type CreateFileEvent struct {
// FileID uint32
// BlockData []byte
// }
// type AppendBlockEvent struct {
// FileID uint32
// BlockData []byte
// }
// type ExecLoadEvent struct {
// FileID uint32
// }
// type BeginLoadQueryEvent struct {
// FileID uint32
// BlockData []byte
// }
// type ExecuteLoadQueryEvent struct {
// SlaveProxyID uint32
// ExecutionTime uint32
// SchemaLength uint8
// ErrorCode uint16
// StatusVarsLength uint16
// FileID uint32
// StartPos uint32
// EndPos uint32
// DupHandlingFlags uint8
// }
// type DeleteFileEvent struct {
// FileID uint32
// }
// type RandEvent struct {
// Seed1 uint64
// Seed2 uint64
// }
// type IntVarEvent struct {
// Type uint8
// Value uint64
// }
// type UserVarEvent struct {
// NameLength uint32
// Name []byte
// IsNull uint8
// //if not is null
// Type uint8
// Charset uint32
// ValueLength uint32
// Value []byte
// //if more data
// Flags uint8
// }
// type IncidentEvent struct {
// Type uint16
// MessageLength uint8
// Message []byte
// }
// type HeartbeatEvent struct {
// }

View File

@@ -0,0 +1,480 @@
package replication
import (
"encoding/json"
"fmt"
"math"
"github.com/juju/errors"
. "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go/hack"
)
const (
JSONB_SMALL_OBJECT byte = iota // small JSON object
JSONB_LARGE_OBJECT // large JSON object
JSONB_SMALL_ARRAY // small JSON array
JSONB_LARGE_ARRAY // large JSON array
JSONB_LITERAL // literal (true/false/null)
JSONB_INT16 // int16
JSONB_UINT16 // uint16
JSONB_INT32 // int32
JSONB_UINT32 // uint32
JSONB_INT64 // int64
JSONB_UINT64 // uint64
JSONB_DOUBLE // double
JSONB_STRING // string
JSONB_OPAQUE byte = 0x0f // custom data (any MySQL data type)
)
const (
JSONB_NULL_LITERAL byte = 0x00
JSONB_TRUE_LITERAL byte = 0x01
JSONB_FALSE_LITERAL byte = 0x02
)
const (
jsonbSmallOffsetSize = 2
jsonbLargeOffsetSize = 4
jsonbKeyEntrySizeSmall = 2 + jsonbSmallOffsetSize
jsonbKeyEntrySizeLarge = 2 + jsonbLargeOffsetSize
jsonbValueEntrySizeSmall = 1 + jsonbSmallOffsetSize
jsonbValueEntrySizeLarge = 1 + jsonbLargeOffsetSize
)
func jsonbGetOffsetSize(isSmall bool) int {
if isSmall {
return jsonbSmallOffsetSize
}
return jsonbLargeOffsetSize
}
func jsonbGetKeyEntrySize(isSmall bool) int {
if isSmall {
return jsonbKeyEntrySizeSmall
}
return jsonbKeyEntrySizeLarge
}
func jsonbGetValueEntrySize(isSmall bool) int {
if isSmall {
return jsonbValueEntrySizeSmall
}
return jsonbValueEntrySizeLarge
}
// decodeJsonBinary decodes the JSON binary encoding data and returns
// the common JSON encoding data.
func (e *RowsEvent) decodeJsonBinary(data []byte) ([]byte, error) {
d := jsonBinaryDecoder{useDecimal: e.useDecimal}
if d.isDataShort(data, 1) {
return nil, d.err
}
v := d.decodeValue(data[0], data[1:])
if d.err != nil {
return nil, d.err
}
return json.Marshal(v)
}
type jsonBinaryDecoder struct {
useDecimal bool
err error
}
func (d *jsonBinaryDecoder) decodeValue(tp byte, data []byte) interface{} {
if d.err != nil {
return nil
}
switch tp {
case JSONB_SMALL_OBJECT:
return d.decodeObjectOrArray(data, true, true)
case JSONB_LARGE_OBJECT:
return d.decodeObjectOrArray(data, false, true)
case JSONB_SMALL_ARRAY:
return d.decodeObjectOrArray(data, true, false)
case JSONB_LARGE_ARRAY:
return d.decodeObjectOrArray(data, false, false)
case JSONB_LITERAL:
return d.decodeLiteral(data)
case JSONB_INT16:
return d.decodeInt16(data)
case JSONB_UINT16:
return d.decodeUint16(data)
case JSONB_INT32:
return d.decodeInt32(data)
case JSONB_UINT32:
return d.decodeUint32(data)
case JSONB_INT64:
return d.decodeInt64(data)
case JSONB_UINT64:
return d.decodeUint64(data)
case JSONB_DOUBLE:
return d.decodeDouble(data)
case JSONB_STRING:
return d.decodeString(data)
case JSONB_OPAQUE:
return d.decodeOpaque(data)
default:
d.err = errors.Errorf("invalid json type %d", tp)
}
return nil
}
func (d *jsonBinaryDecoder) decodeObjectOrArray(data []byte, isSmall bool, isObject bool) interface{} {
offsetSize := jsonbGetOffsetSize(isSmall)
if d.isDataShort(data, 2*offsetSize) {
return nil
}
count := d.decodeCount(data, isSmall)
size := d.decodeCount(data[offsetSize:], isSmall)
if d.isDataShort(data, int(size)) {
return nil
}
keyEntrySize := jsonbGetKeyEntrySize(isSmall)
valueEntrySize := jsonbGetValueEntrySize(isSmall)
headerSize := 2*offsetSize + count*valueEntrySize
if isObject {
headerSize += count * keyEntrySize
}
if headerSize > size {
d.err = errors.Errorf("header size %d > size %d", headerSize, size)
return nil
}
var keys []string
if isObject {
keys = make([]string, count)
for i := 0; i < count; i++ {
// decode key
entryOffset := 2*offsetSize + keyEntrySize*i
keyOffset := d.decodeCount(data[entryOffset:], isSmall)
keyLength := int(d.decodeUint16(data[entryOffset+offsetSize:]))
// Key must start after value entry
if keyOffset < headerSize {
d.err = errors.Errorf("invalid key offset %d, must > %d", keyOffset, headerSize)
return nil
}
if d.isDataShort(data, keyOffset+keyLength) {
return nil
}
keys[i] = hack.String(data[keyOffset : keyOffset+keyLength])
}
}
if d.err != nil {
return nil
}
values := make([]interface{}, count)
for i := 0; i < count; i++ {
// decode value
entryOffset := 2*offsetSize + valueEntrySize*i
if isObject {
entryOffset += keyEntrySize * count
}
tp := data[entryOffset]
if isInlineValue(tp, isSmall) {
values[i] = d.decodeValue(tp, data[entryOffset+1:entryOffset+valueEntrySize])
continue
}
valueOffset := d.decodeCount(data[entryOffset+1:], isSmall)
if d.isDataShort(data, valueOffset) {
return nil
}
values[i] = d.decodeValue(tp, data[valueOffset:])
}
if d.err != nil {
return nil
}
if !isObject {
return values
}
m := make(map[string]interface{}, count)
for i := 0; i < count; i++ {
m[keys[i]] = values[i]
}
return m
}
func isInlineValue(tp byte, isSmall bool) bool {
switch tp {
case JSONB_INT16, JSONB_UINT16, JSONB_LITERAL:
return true
case JSONB_INT32, JSONB_UINT32:
return !isSmall
}
return false
}
func (d *jsonBinaryDecoder) decodeLiteral(data []byte) interface{} {
if d.isDataShort(data, 1) {
return nil
}
tp := data[0]
switch tp {
case JSONB_NULL_LITERAL:
return nil
case JSONB_TRUE_LITERAL:
return true
case JSONB_FALSE_LITERAL:
return false
}
d.err = errors.Errorf("invalid literal %c", tp)
return nil
}
func (d *jsonBinaryDecoder) isDataShort(data []byte, expected int) bool {
if d.err != nil {
return true
}
if len(data) < expected {
d.err = errors.Errorf("data len %d < expected %d", len(data), expected)
}
return d.err != nil
}
func (d *jsonBinaryDecoder) decodeInt16(data []byte) int16 {
if d.isDataShort(data, 2) {
return 0
}
v := ParseBinaryInt16(data[0:2])
return v
}
func (d *jsonBinaryDecoder) decodeUint16(data []byte) uint16 {
if d.isDataShort(data, 2) {
return 0
}
v := ParseBinaryUint16(data[0:2])
return v
}
func (d *jsonBinaryDecoder) decodeInt32(data []byte) int32 {
if d.isDataShort(data, 4) {
return 0
}
v := ParseBinaryInt32(data[0:4])
return v
}
func (d *jsonBinaryDecoder) decodeUint32(data []byte) uint32 {
if d.isDataShort(data, 4) {
return 0
}
v := ParseBinaryUint32(data[0:4])
return v
}
func (d *jsonBinaryDecoder) decodeInt64(data []byte) int64 {
if d.isDataShort(data, 8) {
return 0
}
v := ParseBinaryInt64(data[0:8])
return v
}
func (d *jsonBinaryDecoder) decodeUint64(data []byte) uint64 {
if d.isDataShort(data, 8) {
return 0
}
v := ParseBinaryUint64(data[0:8])
return v
}
func (d *jsonBinaryDecoder) decodeDouble(data []byte) float64 {
if d.isDataShort(data, 8) {
return 0
}
v := ParseBinaryFloat64(data[0:8])
return v
}
func (d *jsonBinaryDecoder) decodeString(data []byte) string {
if d.err != nil {
return ""
}
l, n := d.decodeVariableLength(data)
if d.isDataShort(data, l+n) {
return ""
}
data = data[n:]
v := hack.String(data[0:l])
return v
}
func (d *jsonBinaryDecoder) decodeOpaque(data []byte) interface{} {
if d.isDataShort(data, 1) {
return nil
}
tp := data[0]
data = data[1:]
l, n := d.decodeVariableLength(data)
if d.isDataShort(data, l+n) {
return nil
}
data = data[n : l+n]
switch tp {
case MYSQL_TYPE_NEWDECIMAL:
return d.decodeDecimal(data)
case MYSQL_TYPE_TIME:
return d.decodeTime(data)
case MYSQL_TYPE_DATE, MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIMESTAMP:
return d.decodeDateTime(data)
default:
return hack.String(data)
}
return nil
}
func (d *jsonBinaryDecoder) decodeDecimal(data []byte) interface{} {
precision := int(data[0])
scale := int(data[1])
v, _, err := decodeDecimal(data[2:], precision, scale, d.useDecimal)
d.err = err
return v
}
func (d *jsonBinaryDecoder) decodeTime(data []byte) interface{} {
v := d.decodeInt64(data)
if v == 0 {
return "00:00:00"
}
sign := ""
if v < 0 {
sign = "-"
v = -v
}
intPart := v >> 24
hour := (intPart >> 12) % (1 << 10)
min := (intPart >> 6) % (1 << 6)
sec := intPart % (1 << 6)
frac := v % (1 << 24)
return fmt.Sprintf("%s%02d:%02d:%02d.%06d", sign, hour, min, sec, frac)
}
func (d *jsonBinaryDecoder) decodeDateTime(data []byte) interface{} {
v := d.decodeInt64(data)
if v == 0 {
return "0000-00-00 00:00:00"
}
// handle negative?
if v < 0 {
v = -v
}
intPart := v >> 24
ymd := intPart >> 17
ym := ymd >> 5
hms := intPart % (1 << 17)
year := ym / 13
month := ym % 13
day := ymd % (1 << 5)
hour := (hms >> 12)
minute := (hms >> 6) % (1 << 6)
second := hms % (1 << 6)
frac := v % (1 << 24)
return fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d.%06d", year, month, day, hour, minute, second, frac)
}
func (d *jsonBinaryDecoder) decodeCount(data []byte, isSmall bool) int {
if isSmall {
v := d.decodeUint16(data)
return int(v)
}
return int(d.decodeUint32(data))
}
func (d *jsonBinaryDecoder) decodeVariableLength(data []byte) (int, int) {
// The max size for variable length is math.MaxUint32, so
// here we can use 5 bytes to save it.
maxCount := 5
if len(data) < maxCount {
maxCount = len(data)
}
pos := 0
length := uint64(0)
for ; pos < maxCount; pos++ {
v := data[pos]
length |= uint64(v&0x7F) << uint(7*pos)
if v&0x80 == 0 {
if length > math.MaxUint32 {
d.err = errors.Errorf("variable length %d must <= %d", length, math.MaxUint32)
return 0, 0
}
pos += 1
// TODO: should consider length overflow int here.
return int(length), pos
}
}
d.err = errors.New("decode variable length failed")
return 0, 0
}

View File

@@ -0,0 +1,334 @@
package replication
import (
"bytes"
"fmt"
"io"
"os"
"sync/atomic"
"github.com/juju/errors"
)
type BinlogParser struct {
format *FormatDescriptionEvent
tables map[uint64]*TableMapEvent
// for rawMode, we only parse FormatDescriptionEvent and RotateEvent
rawMode bool
parseTime bool
// used to start/stop processing
stopProcessing uint32
useDecimal bool
}
func NewBinlogParser() *BinlogParser {
p := new(BinlogParser)
p.tables = make(map[uint64]*TableMapEvent)
// p.stop = make(uint32)
return p
}
func (p *BinlogParser) Stop() {
atomic.StoreUint32(&p.stopProcessing, 1)
}
func (p *BinlogParser) Resume() {
atomic.StoreUint32(&p.stopProcessing, 0)
}
func (p *BinlogParser) Reset() {
p.format = nil
}
type OnEventFunc func(*BinlogEvent) error
func (p *BinlogParser) ParseFile(name string, offset int64, onEvent OnEventFunc) error {
f, err := os.Open(name)
if err != nil {
return errors.Trace(err)
}
defer f.Close()
b := make([]byte, 4)
if _, err = f.Read(b); err != nil {
return errors.Trace(err)
} else if !bytes.Equal(b, BinLogFileHeader) {
return errors.Errorf("%s is not a valid binlog file, head 4 bytes must fe'bin' ", name)
}
if offset < 4 {
offset = 4
} else if offset > 4 {
// FORMAT_DESCRIPTION event should be read by default always (despite that fact passed offset may be higher than 4)
if _, err = f.Seek(4, os.SEEK_SET); err != nil {
return errors.Errorf("seek %s to %d error %v", name, offset, err)
}
p.getFormatDescriptionEvent(f, onEvent)
}
if _, err = f.Seek(offset, os.SEEK_SET); err != nil {
return errors.Errorf("seek %s to %d error %v", name, offset, err)
}
return p.ParseReader(f, onEvent)
}
func (p *BinlogParser) getFormatDescriptionEvent(r io.Reader, onEvent OnEventFunc) error {
_, err := p.parseSingleEvent(&r, onEvent)
return err
}
func (p *BinlogParser) parseSingleEvent(r *io.Reader, onEvent OnEventFunc) (bool, error) {
var err error
var n int64
headBuf := make([]byte, EventHeaderSize)
if _, err = io.ReadFull(*r, headBuf); err == io.EOF {
return true, nil
} else if err != nil {
return false, errors.Trace(err)
}
var h *EventHeader
h, err = p.parseHeader(headBuf)
if err != nil {
return false, errors.Trace(err)
}
if h.EventSize <= uint32(EventHeaderSize) {
return false, errors.Errorf("invalid event header, event size is %d, too small", h.EventSize)
}
var buf bytes.Buffer
if n, err = io.CopyN(&buf, *r, int64(h.EventSize)-int64(EventHeaderSize)); err != nil {
return false, errors.Errorf("get event body err %v, need %d - %d, but got %d", err, h.EventSize, EventHeaderSize, n)
}
data := buf.Bytes()
rawData := data
eventLen := int(h.EventSize) - EventHeaderSize
if len(data) != eventLen {
return false, errors.Errorf("invalid data size %d in event %s, less event length %d", len(data), h.EventType, eventLen)
}
var e Event
e, err = p.parseEvent(h, data)
if err != nil {
if _, ok := err.(errMissingTableMapEvent); ok {
return false, nil
}
return false, errors.Trace(err)
}
if err = onEvent(&BinlogEvent{rawData, h, e}); err != nil {
return false, errors.Trace(err)
}
return false, nil
}
func (p *BinlogParser) ParseReader(r io.Reader, onEvent OnEventFunc) error {
for {
if atomic.LoadUint32(&p.stopProcessing) == 1 {
break
}
done, err := p.parseSingleEvent(&r, onEvent)
if err != nil {
if _, ok := err.(errMissingTableMapEvent); ok {
continue
}
return errors.Trace(err)
}
if done {
break
}
}
return nil
}
func (p *BinlogParser) SetRawMode(mode bool) {
p.rawMode = mode
}
func (p *BinlogParser) SetParseTime(parseTime bool) {
p.parseTime = parseTime
}
func (p *BinlogParser) SetUseDecimal(useDecimal bool) {
p.useDecimal = useDecimal
}
func (p *BinlogParser) parseHeader(data []byte) (*EventHeader, error) {
h := new(EventHeader)
err := h.Decode(data)
if err != nil {
return nil, err
}
return h, nil
}
func (p *BinlogParser) parseEvent(h *EventHeader, data []byte) (Event, error) {
var e Event
if h.EventType == FORMAT_DESCRIPTION_EVENT {
p.format = &FormatDescriptionEvent{}
e = p.format
} else {
if p.format != nil && p.format.ChecksumAlgorithm == BINLOG_CHECKSUM_ALG_CRC32 {
data = data[0 : len(data)-4]
}
if h.EventType == ROTATE_EVENT {
e = &RotateEvent{}
} else if !p.rawMode {
switch h.EventType {
case QUERY_EVENT:
e = &QueryEvent{}
case XID_EVENT:
e = &XIDEvent{}
case TABLE_MAP_EVENT:
te := &TableMapEvent{}
if p.format.EventTypeHeaderLengths[TABLE_MAP_EVENT-1] == 6 {
te.tableIDSize = 4
} else {
te.tableIDSize = 6
}
e = te
case WRITE_ROWS_EVENTv0,
UPDATE_ROWS_EVENTv0,
DELETE_ROWS_EVENTv0,
WRITE_ROWS_EVENTv1,
DELETE_ROWS_EVENTv1,
UPDATE_ROWS_EVENTv1,
WRITE_ROWS_EVENTv2,
UPDATE_ROWS_EVENTv2,
DELETE_ROWS_EVENTv2:
e = p.newRowsEvent(h)
case ROWS_QUERY_EVENT:
e = &RowsQueryEvent{}
case GTID_EVENT:
e = &GTIDEvent{}
case BEGIN_LOAD_QUERY_EVENT:
e = &BeginLoadQueryEvent{}
case EXECUTE_LOAD_QUERY_EVENT:
e = &ExecuteLoadQueryEvent{}
case MARIADB_ANNOTATE_ROWS_EVENT:
e = &MariadbAnnotateRowsEvent{}
case MARIADB_BINLOG_CHECKPOINT_EVENT:
e = &MariadbBinlogCheckPointEvent{}
case MARIADB_GTID_LIST_EVENT:
e = &MariadbGTIDListEvent{}
case MARIADB_GTID_EVENT:
ee := &MariadbGTIDEvent{}
ee.GTID.ServerID = h.ServerID
e = ee
default:
e = &GenericEvent{}
}
} else {
e = &GenericEvent{}
}
}
if err := e.Decode(data); err != nil {
return nil, &EventError{h, err.Error(), data}
}
if te, ok := e.(*TableMapEvent); ok {
p.tables[te.TableID] = te
}
if re, ok := e.(*RowsEvent); ok {
if (re.Flags & RowsEventStmtEndFlag) > 0 {
// Refer https://github.com/alibaba/canal/blob/38cc81b7dab29b51371096fb6763ca3a8432ffee/dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogEvent.java#L176
p.tables = make(map[uint64]*TableMapEvent)
}
}
return e, nil
}
// Given the bytes for a a binary log event: return the decoded event.
// With the exception of the FORMAT_DESCRIPTION_EVENT event type
// there must have previously been passed a FORMAT_DESCRIPTION_EVENT
// into the parser for this to work properly on any given event.
// Passing a new FORMAT_DESCRIPTION_EVENT into the parser will replace
// an existing one.
func (p *BinlogParser) Parse(data []byte) (*BinlogEvent, error) {
rawData := data
h, err := p.parseHeader(data)
if err != nil {
return nil, err
}
data = data[EventHeaderSize:]
eventLen := int(h.EventSize) - EventHeaderSize
if len(data) != eventLen {
return nil, fmt.Errorf("invalid data size %d in event %s, less event length %d", len(data), h.EventType, eventLen)
}
e, err := p.parseEvent(h, data)
if err != nil {
return nil, err
}
return &BinlogEvent{rawData, h, e}, nil
}
func (p *BinlogParser) newRowsEvent(h *EventHeader) *RowsEvent {
e := &RowsEvent{}
if p.format.EventTypeHeaderLengths[h.EventType-1] == 6 {
e.tableIDSize = 4
} else {
e.tableIDSize = 6
}
e.needBitmap2 = false
e.tables = p.tables
e.parseTime = p.parseTime
e.useDecimal = p.useDecimal
switch h.EventType {
case WRITE_ROWS_EVENTv0:
e.Version = 0
case UPDATE_ROWS_EVENTv0:
e.Version = 0
case DELETE_ROWS_EVENTv0:
e.Version = 0
case WRITE_ROWS_EVENTv1:
e.Version = 1
case DELETE_ROWS_EVENTv1:
e.Version = 1
case UPDATE_ROWS_EVENTv1:
e.Version = 1
e.needBitmap2 = true
case WRITE_ROWS_EVENTv2:
e.Version = 2
case UPDATE_ROWS_EVENTv2:
e.Version = 2
e.needBitmap2 = true
case DELETE_ROWS_EVENTv2:
e.Version = 2
}
return e
}

View File

@@ -0,0 +1,844 @@
package replication
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"strconv"
"time"
"github.com/juju/errors"
"github.com/shopspring/decimal"
. "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go/hack"
log "github.com/sirupsen/logrus"
)
type errMissingTableMapEvent error
type TableMapEvent struct {
tableIDSize int
TableID uint64
Flags uint16
Schema []byte
Table []byte
ColumnCount uint64
ColumnType []byte
ColumnMeta []uint16
//len = (ColumnCount + 7) / 8
NullBitmap []byte
}
func (e *TableMapEvent) Decode(data []byte) error {
pos := 0
e.TableID = FixedLengthInt(data[0:e.tableIDSize])
pos += e.tableIDSize
e.Flags = binary.LittleEndian.Uint16(data[pos:])
pos += 2
schemaLength := data[pos]
pos++
e.Schema = data[pos : pos+int(schemaLength)]
pos += int(schemaLength)
//skip 0x00
pos++
tableLength := data[pos]
pos++
e.Table = data[pos : pos+int(tableLength)]
pos += int(tableLength)
//skip 0x00
pos++
var n int
e.ColumnCount, _, n = LengthEncodedInt(data[pos:])
pos += n
e.ColumnType = data[pos : pos+int(e.ColumnCount)]
pos += int(e.ColumnCount)
var err error
var metaData []byte
if metaData, _, n, err = LengthEnodedString(data[pos:]); err != nil {
return errors.Trace(err)
}
if err = e.decodeMeta(metaData); err != nil {
return errors.Trace(err)
}
pos += n
if len(data[pos:]) != bitmapByteSize(int(e.ColumnCount)) {
return io.EOF
}
e.NullBitmap = data[pos:]
return nil
}
func bitmapByteSize(columnCount int) int {
return int(columnCount+7) / 8
}
// see mysql sql/log_event.h
/*
0 byte
MYSQL_TYPE_DECIMAL
MYSQL_TYPE_TINY
MYSQL_TYPE_SHORT
MYSQL_TYPE_LONG
MYSQL_TYPE_NULL
MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_LONGLONG
MYSQL_TYPE_INT24
MYSQL_TYPE_DATE
MYSQL_TYPE_TIME
MYSQL_TYPE_DATETIME
MYSQL_TYPE_YEAR
1 byte
MYSQL_TYPE_FLOAT
MYSQL_TYPE_DOUBLE
MYSQL_TYPE_BLOB
MYSQL_TYPE_GEOMETRY
//maybe
MYSQL_TYPE_TIME2
MYSQL_TYPE_DATETIME2
MYSQL_TYPE_TIMESTAMP2
2 byte
MYSQL_TYPE_VARCHAR
MYSQL_TYPE_BIT
MYSQL_TYPE_NEWDECIMAL
MYSQL_TYPE_VAR_STRING
MYSQL_TYPE_STRING
This enumeration value is only used internally and cannot exist in a binlog.
MYSQL_TYPE_NEWDATE
MYSQL_TYPE_ENUM
MYSQL_TYPE_SET
MYSQL_TYPE_TINY_BLOB
MYSQL_TYPE_MEDIUM_BLOB
MYSQL_TYPE_LONG_BLOB
*/
func (e *TableMapEvent) decodeMeta(data []byte) error {
pos := 0
e.ColumnMeta = make([]uint16, e.ColumnCount)
for i, t := range e.ColumnType {
switch t {
case MYSQL_TYPE_STRING:
var x uint16 = uint16(data[pos]) << 8 //real type
x += uint16(data[pos+1]) //pack or field length
e.ColumnMeta[i] = x
pos += 2
case MYSQL_TYPE_NEWDECIMAL:
var x uint16 = uint16(data[pos]) << 8 //precision
x += uint16(data[pos+1]) //decimals
e.ColumnMeta[i] = x
pos += 2
case MYSQL_TYPE_VAR_STRING,
MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_BIT:
e.ColumnMeta[i] = binary.LittleEndian.Uint16(data[pos:])
pos += 2
case MYSQL_TYPE_BLOB,
MYSQL_TYPE_DOUBLE,
MYSQL_TYPE_FLOAT,
MYSQL_TYPE_GEOMETRY,
MYSQL_TYPE_JSON:
e.ColumnMeta[i] = uint16(data[pos])
pos++
case MYSQL_TYPE_TIME2,
MYSQL_TYPE_DATETIME2,
MYSQL_TYPE_TIMESTAMP2:
e.ColumnMeta[i] = uint16(data[pos])
pos++
case MYSQL_TYPE_NEWDATE,
MYSQL_TYPE_ENUM,
MYSQL_TYPE_SET,
MYSQL_TYPE_TINY_BLOB,
MYSQL_TYPE_MEDIUM_BLOB,
MYSQL_TYPE_LONG_BLOB:
return errors.Errorf("unsupport type in binlog %d", t)
default:
e.ColumnMeta[i] = 0
}
}
return nil
}
func (e *TableMapEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "TableID: %d\n", e.TableID)
fmt.Fprintf(w, "TableID size: %d\n", e.tableIDSize)
fmt.Fprintf(w, "Flags: %d\n", e.Flags)
fmt.Fprintf(w, "Schema: %s\n", e.Schema)
fmt.Fprintf(w, "Table: %s\n", e.Table)
fmt.Fprintf(w, "Column count: %d\n", e.ColumnCount)
fmt.Fprintf(w, "Column type: \n%s", hex.Dump(e.ColumnType))
fmt.Fprintf(w, "NULL bitmap: \n%s", hex.Dump(e.NullBitmap))
fmt.Fprintln(w)
}
// RowsEventStmtEndFlag is set in the end of the statement.
const RowsEventStmtEndFlag = 0x01
type RowsEvent struct {
//0, 1, 2
Version int
tableIDSize int
tables map[uint64]*TableMapEvent
needBitmap2 bool
Table *TableMapEvent
TableID uint64
Flags uint16
//if version == 2
ExtraData []byte
//lenenc_int
ColumnCount uint64
//len = (ColumnCount + 7) / 8
ColumnBitmap1 []byte
//if UPDATE_ROWS_EVENTv1 or v2
//len = (ColumnCount + 7) / 8
ColumnBitmap2 []byte
//rows: invalid: int64, float64, bool, []byte, string
Rows [][]interface{}
parseTime bool
useDecimal bool
}
func (e *RowsEvent) Decode(data []byte) error {
pos := 0
e.TableID = FixedLengthInt(data[0:e.tableIDSize])
pos += e.tableIDSize
e.Flags = binary.LittleEndian.Uint16(data[pos:])
pos += 2
if e.Version == 2 {
dataLen := binary.LittleEndian.Uint16(data[pos:])
pos += 2
e.ExtraData = data[pos : pos+int(dataLen-2)]
pos += int(dataLen - 2)
}
var n int
e.ColumnCount, _, n = LengthEncodedInt(data[pos:])
pos += n
bitCount := bitmapByteSize(int(e.ColumnCount))
e.ColumnBitmap1 = data[pos : pos+bitCount]
pos += bitCount
if e.needBitmap2 {
e.ColumnBitmap2 = data[pos : pos+bitCount]
pos += bitCount
}
var ok bool
e.Table, ok = e.tables[e.TableID]
if !ok {
if len(e.tables) > 0 {
return errors.Errorf("invalid table id %d, no corresponding table map event", e.TableID)
} else {
return errMissingTableMapEvent(errors.Errorf("invalid table id %d, no corresponding table map event", e.TableID))
}
}
var err error
// ... repeat rows until event-end
defer func() {
if r := recover(); r != nil {
log.Fatalf("parse rows event panic %v, data %q, parsed rows %#v, table map %#v\n%s", r, data, e, e.Table, Pstack())
}
}()
for pos < len(data) {
if n, err = e.decodeRows(data[pos:], e.Table, e.ColumnBitmap1); err != nil {
return errors.Trace(err)
}
pos += n
if e.needBitmap2 {
if n, err = e.decodeRows(data[pos:], e.Table, e.ColumnBitmap2); err != nil {
return errors.Trace(err)
}
pos += n
}
}
return nil
}
func isBitSet(bitmap []byte, i int) bool {
return bitmap[i>>3]&(1<<(uint(i)&7)) > 0
}
func (e *RowsEvent) decodeRows(data []byte, table *TableMapEvent, bitmap []byte) (int, error) {
row := make([]interface{}, e.ColumnCount)
pos := 0
// refer: https://github.com/alibaba/canal/blob/c3e38e50e269adafdd38a48c63a1740cde304c67/dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java#L63
count := 0
for i := 0; i < int(e.ColumnCount); i++ {
if isBitSet(bitmap, i) {
count++
}
}
count = (count + 7) / 8
nullBitmap := data[pos : pos+count]
pos += count
nullbitIndex := 0
var n int
var err error
for i := 0; i < int(e.ColumnCount); i++ {
if !isBitSet(bitmap, i) {
continue
}
isNull := (uint32(nullBitmap[nullbitIndex/8]) >> uint32(nullbitIndex%8)) & 0x01
nullbitIndex++
if isNull > 0 {
row[i] = nil
continue
}
row[i], n, err = e.decodeValue(data[pos:], table.ColumnType[i], table.ColumnMeta[i])
if err != nil {
return 0, err
}
pos += n
}
e.Rows = append(e.Rows, row)
return pos, nil
}
func (e *RowsEvent) parseFracTime(t interface{}) interface{} {
v, ok := t.(fracTime)
if !ok {
return t
}
if !e.parseTime {
// Don't parse time, return string directly
return v.String()
}
// return Golang time directly
return v.Time
}
// see mysql sql/log_event.cc log_event_print_value
func (e *RowsEvent) decodeValue(data []byte, tp byte, meta uint16) (v interface{}, n int, err error) {
var length int = 0
if tp == MYSQL_TYPE_STRING {
if meta >= 256 {
b0 := uint8(meta >> 8)
b1 := uint8(meta & 0xFF)
if b0&0x30 != 0x30 {
length = int(uint16(b1) | (uint16((b0&0x30)^0x30) << 4))
tp = byte(b0 | 0x30)
} else {
length = int(meta & 0xFF)
tp = b0
}
} else {
length = int(meta)
}
}
switch tp {
case MYSQL_TYPE_NULL:
return nil, 0, nil
case MYSQL_TYPE_LONG:
n = 4
v = ParseBinaryInt32(data)
case MYSQL_TYPE_TINY:
n = 1
v = ParseBinaryInt8(data)
case MYSQL_TYPE_SHORT:
n = 2
v = ParseBinaryInt16(data)
case MYSQL_TYPE_INT24:
n = 3
v = ParseBinaryInt24(data)
case MYSQL_TYPE_LONGLONG:
n = 8
v = ParseBinaryInt64(data)
case MYSQL_TYPE_NEWDECIMAL:
prec := uint8(meta >> 8)
scale := uint8(meta & 0xFF)
v, n, err = decodeDecimal(data, int(prec), int(scale), e.useDecimal)
case MYSQL_TYPE_FLOAT:
n = 4
v = ParseBinaryFloat32(data)
case MYSQL_TYPE_DOUBLE:
n = 8
v = ParseBinaryFloat64(data)
case MYSQL_TYPE_BIT:
nbits := ((meta >> 8) * 8) + (meta & 0xFF)
n = int(nbits+7) / 8
//use int64 for bit
v, err = decodeBit(data, int(nbits), int(n))
case MYSQL_TYPE_TIMESTAMP:
n = 4
t := binary.LittleEndian.Uint32(data)
v = e.parseFracTime(fracTime{time.Unix(int64(t), 0), 0})
case MYSQL_TYPE_TIMESTAMP2:
v, n, err = decodeTimestamp2(data, meta)
v = e.parseFracTime(v)
case MYSQL_TYPE_DATETIME:
n = 8
i64 := binary.LittleEndian.Uint64(data)
d := i64 / 1000000
t := i64 % 1000000
v = e.parseFracTime(fracTime{time.Date(int(d/10000),
time.Month((d%10000)/100),
int(d%100),
int(t/10000),
int((t%10000)/100),
int(t%100),
0,
time.UTC), 0})
case MYSQL_TYPE_DATETIME2:
v, n, err = decodeDatetime2(data, meta)
v = e.parseFracTime(v)
case MYSQL_TYPE_TIME:
n = 3
i32 := uint32(FixedLengthInt(data[0:3]))
if i32 == 0 {
v = "00:00:00"
} else {
sign := ""
if i32 < 0 {
sign = "-"
}
v = fmt.Sprintf("%s%02d:%02d:%02d", sign, i32/10000, (i32%10000)/100, i32%100)
}
case MYSQL_TYPE_TIME2:
v, n, err = decodeTime2(data, meta)
case MYSQL_TYPE_DATE:
n = 3
i32 := uint32(FixedLengthInt(data[0:3]))
if i32 == 0 {
v = "0000-00-00"
} else {
v = fmt.Sprintf("%04d-%02d-%02d", i32/(16*32), i32/32%16, i32%32)
}
case MYSQL_TYPE_YEAR:
n = 1
v = int(data[0]) + 1900
case MYSQL_TYPE_ENUM:
l := meta & 0xFF
switch l {
case 1:
v = int64(data[0])
n = 1
case 2:
v = int64(binary.BigEndian.Uint16(data))
n = 2
default:
err = fmt.Errorf("Unknown ENUM packlen=%d", l)
}
case MYSQL_TYPE_SET:
n = int(meta & 0xFF)
nbits := n * 8
v, err = decodeBit(data, nbits, n)
case MYSQL_TYPE_BLOB:
v, n, err = decodeBlob(data, meta)
case MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_VAR_STRING:
length = int(meta)
v, n = decodeString(data, length)
case MYSQL_TYPE_STRING:
v, n = decodeString(data, length)
case MYSQL_TYPE_JSON:
// Refer: https://github.com/shyiko/mysql-binlog-connector-java/blob/master/src/main/java/com/github/shyiko/mysql/binlog/event/deserialization/AbstractRowsEventDataDeserializer.java#L404
length = int(FixedLengthInt(data[0:meta]))
n = length + int(meta)
v, err = e.decodeJsonBinary(data[meta:n])
case MYSQL_TYPE_GEOMETRY:
// MySQL saves Geometry as Blob in binlog
// Seem that the binary format is SRID (4 bytes) + WKB, outer can use
// MySQL GeoFromWKB or others to create the geometry data.
// Refer https://dev.mysql.com/doc/refman/5.7/en/gis-wkb-functions.html
// I also find some go libs to handle WKB if possible
// see https://github.com/twpayne/go-geom or https://github.com/paulmach/go.geo
v, n, err = decodeBlob(data, meta)
default:
err = fmt.Errorf("unsupport type %d in binlog and don't know how to handle", tp)
}
return
}
func decodeString(data []byte, length int) (v string, n int) {
if length < 256 {
length = int(data[0])
n = int(length) + 1
v = hack.String(data[1:n])
} else {
length = int(binary.LittleEndian.Uint16(data[0:]))
n = length + 2
v = hack.String(data[2:n])
}
return
}
const digitsPerInteger int = 9
var compressedBytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4}
func decodeDecimalDecompressValue(compIndx int, data []byte, mask uint8) (size int, value uint32) {
size = compressedBytes[compIndx]
databuff := make([]byte, size)
for i := 0; i < size; i++ {
databuff[i] = data[i] ^ mask
}
value = uint32(BFixedLengthInt(databuff))
return
}
func decodeDecimal(data []byte, precision int, decimals int, useDecimal bool) (interface{}, int, error) {
//see python mysql replication and https://github.com/jeremycole/mysql_binlog
integral := (precision - decimals)
uncompIntegral := int(integral / digitsPerInteger)
uncompFractional := int(decimals / digitsPerInteger)
compIntegral := integral - (uncompIntegral * digitsPerInteger)
compFractional := decimals - (uncompFractional * digitsPerInteger)
binSize := uncompIntegral*4 + compressedBytes[compIntegral] +
uncompFractional*4 + compressedBytes[compFractional]
buf := make([]byte, binSize)
copy(buf, data[:binSize])
//must copy the data for later change
data = buf
// Support negative
// The sign is encoded in the high bit of the the byte
// But this bit can also be used in the value
value := uint32(data[0])
var res bytes.Buffer
var mask uint32 = 0
if value&0x80 == 0 {
mask = uint32((1 << 32) - 1)
res.WriteString("-")
}
//clear sign
data[0] ^= 0x80
pos, value := decodeDecimalDecompressValue(compIntegral, data, uint8(mask))
res.WriteString(fmt.Sprintf("%d", value))
for i := 0; i < uncompIntegral; i++ {
value = binary.BigEndian.Uint32(data[pos:]) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
res.WriteString(".")
for i := 0; i < uncompFractional; i++ {
value = binary.BigEndian.Uint32(data[pos:]) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
if size, value := decodeDecimalDecompressValue(compFractional, data[pos:], uint8(mask)); size > 0 {
res.WriteString(fmt.Sprintf("%0*d", compFractional, value))
pos += size
}
if useDecimal {
f, err := decimal.NewFromString(hack.String(res.Bytes()))
return f, pos, err
}
f, err := strconv.ParseFloat(hack.String(res.Bytes()), 64)
return f, pos, err
}
func decodeBit(data []byte, nbits int, length int) (value int64, err error) {
if nbits > 1 {
switch length {
case 1:
value = int64(data[0])
case 2:
value = int64(binary.BigEndian.Uint16(data))
case 3:
value = int64(BFixedLengthInt(data[0:3]))
case 4:
value = int64(binary.BigEndian.Uint32(data))
case 5:
value = int64(BFixedLengthInt(data[0:5]))
case 6:
value = int64(BFixedLengthInt(data[0:6]))
case 7:
value = int64(BFixedLengthInt(data[0:7]))
case 8:
value = int64(binary.BigEndian.Uint64(data))
default:
err = fmt.Errorf("invalid bit length %d", length)
}
} else {
if length != 1 {
err = fmt.Errorf("invalid bit length %d", length)
} else {
value = int64(data[0])
}
}
return
}
func decodeTimestamp2(data []byte, dec uint16) (interface{}, int, error) {
//get timestamp binary length
n := int(4 + (dec+1)/2)
sec := int64(binary.BigEndian.Uint32(data[0:4]))
usec := int64(0)
switch dec {
case 1, 2:
usec = int64(data[4]) * 10000
case 3, 4:
usec = int64(binary.BigEndian.Uint16(data[4:])) * 100
case 5, 6:
usec = int64(BFixedLengthInt(data[4:7]))
}
if sec == 0 {
return formatZeroTime(int(usec), int(dec)), n, nil
}
return fracTime{time.Unix(sec, usec*1000), int(dec)}, n, nil
}
const DATETIMEF_INT_OFS int64 = 0x8000000000
func decodeDatetime2(data []byte, dec uint16) (interface{}, int, error) {
//get datetime binary length
n := int(5 + (dec+1)/2)
intPart := int64(BFixedLengthInt(data[0:5])) - DATETIMEF_INT_OFS
var frac int64 = 0
switch dec {
case 1, 2:
frac = int64(data[5]) * 10000
case 3, 4:
frac = int64(binary.BigEndian.Uint16(data[5:7])) * 100
case 5, 6:
frac = int64(BFixedLengthInt(data[5:8]))
}
if intPart == 0 {
return formatZeroTime(int(frac), int(dec)), n, nil
}
tmp := intPart<<24 + frac
//handle sign???
if tmp < 0 {
tmp = -tmp
}
// var secPart int64 = tmp % (1 << 24)
ymdhms := tmp >> 24
ymd := ymdhms >> 17
ym := ymd >> 5
hms := ymdhms % (1 << 17)
day := int(ymd % (1 << 5))
month := int(ym % 13)
year := int(ym / 13)
second := int(hms % (1 << 6))
minute := int((hms >> 6) % (1 << 6))
hour := int((hms >> 12))
return fracTime{time.Date(year, time.Month(month), day, hour, minute, second, int(frac*1000), time.UTC), int(dec)}, n, nil
}
const TIMEF_OFS int64 = 0x800000000000
const TIMEF_INT_OFS int64 = 0x800000
func decodeTime2(data []byte, dec uint16) (string, int, error) {
//time binary length
n := int(3 + (dec+1)/2)
tmp := int64(0)
intPart := int64(0)
frac := int64(0)
switch dec {
case 1:
case 2:
intPart = int64(BFixedLengthInt(data[0:3])) - TIMEF_INT_OFS
frac = int64(data[3])
if intPart < 0 && frac > 0 {
/*
Negative values are stored with reverse fractional part order,
for binary sort compatibility.
Disk value intpart frac Time value Memory value
800000.00 0 0 00:00:00.00 0000000000.000000
7FFFFF.FF -1 255 -00:00:00.01 FFFFFFFFFF.FFD8F0
7FFFFF.9D -1 99 -00:00:00.99 FFFFFFFFFF.F0E4D0
7FFFFF.00 -1 0 -00:00:01.00 FFFFFFFFFF.000000
7FFFFE.FF -1 255 -00:00:01.01 FFFFFFFFFE.FFD8F0
7FFFFE.F6 -2 246 -00:00:01.10 FFFFFFFFFE.FE7960
Formula to convert fractional part from disk format
(now stored in "frac" variable) to absolute value: "0x100 - frac".
To reconstruct in-memory value, we shift
to the next integer value and then substruct fractional part.
*/
intPart++ /* Shift to the next integer value */
frac -= 0x100 /* -(0x100 - frac) */
}
tmp = intPart<<24 + frac*10000
case 3:
case 4:
intPart = int64(BFixedLengthInt(data[0:3])) - TIMEF_INT_OFS
frac = int64(binary.BigEndian.Uint16(data[3:5]))
if intPart < 0 && frac > 0 {
/*
Fix reverse fractional part order: "0x10000 - frac".
See comments for FSP=1 and FSP=2 above.
*/
intPart++ /* Shift to the next integer value */
frac -= 0x10000 /* -(0x10000-frac) */
}
tmp = intPart<<24 + frac*100
case 5:
case 6:
tmp = int64(BFixedLengthInt(data[0:6])) - TIMEF_OFS
default:
intPart = int64(BFixedLengthInt(data[0:3])) - TIMEF_INT_OFS
tmp = intPart << 24
}
if intPart == 0 {
return "00:00:00", n, nil
}
hms := int64(0)
sign := ""
if tmp < 0 {
tmp = -tmp
sign = "-"
}
hms = tmp >> 24
hour := (hms >> 12) % (1 << 10) /* 10 bits starting at 12th */
minute := (hms >> 6) % (1 << 6) /* 6 bits starting at 6th */
second := hms % (1 << 6) /* 6 bits starting at 0th */
secPart := tmp % (1 << 24)
if secPart != 0 {
return fmt.Sprintf("%s%02d:%02d:%02d.%06d", sign, hour, minute, second, secPart), n, nil
}
return fmt.Sprintf("%s%02d:%02d:%02d", sign, hour, minute, second), n, nil
}
func decodeBlob(data []byte, meta uint16) (v []byte, n int, err error) {
var length int
switch meta {
case 1:
length = int(data[0])
v = data[1 : 1+length]
n = length + 1
case 2:
length = int(binary.LittleEndian.Uint16(data))
v = data[2 : 2+length]
n = length + 2
case 3:
length = int(FixedLengthInt(data[0:3]))
v = data[3 : 3+length]
n = length + 3
case 4:
length = int(binary.LittleEndian.Uint32(data))
v = data[4 : 4+length]
n = length + 4
default:
err = fmt.Errorf("invalid blob packlen = %d", meta)
}
return
}
func (e *RowsEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "TableID: %d\n", e.TableID)
fmt.Fprintf(w, "Flags: %d\n", e.Flags)
fmt.Fprintf(w, "Column count: %d\n", e.ColumnCount)
fmt.Fprintf(w, "Values:\n")
for _, rows := range e.Rows {
fmt.Fprintf(w, "--\n")
for j, d := range rows {
if _, ok := d.([]byte); ok {
fmt.Fprintf(w, "%d:%q\n", j, d)
} else {
fmt.Fprintf(w, "%d:%#v\n", j, d)
}
}
}
fmt.Fprintln(w)
}
type RowsQueryEvent struct {
Query []byte
}
func (e *RowsQueryEvent) Decode(data []byte) error {
//ignore length byte 1
e.Query = data[1:]
return nil
}
func (e *RowsQueryEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Query: %s\n", e.Query)
fmt.Fprintln(w)
}

View File

@@ -0,0 +1,43 @@
package replication
import (
"fmt"
"strings"
"time"
)
var (
fracTimeFormat []string
)
// fracTime is a help structure wrapping Golang Time.
type fracTime struct {
time.Time
// Dec must in [0, 6]
Dec int
}
func (t fracTime) String() string {
return t.Format(fracTimeFormat[t.Dec])
}
func formatZeroTime(frac int, dec int) string {
if dec == 0 {
return "0000-00-00 00:00:00"
}
s := fmt.Sprintf("0000-00-00 00:00:00.%06d", frac)
// dec must < 6, if frac is 924000, but dec is 3, we must output 924 here.
return s[0 : len(s)-(6-dec)]
}
func init() {
fracTimeFormat = make([]string, 7)
fracTimeFormat[0] = "2006-01-02 15:04:05"
for i := 1; i <= 6; i++ {
fracTimeFormat[i] = fmt.Sprintf("2006-01-02 15:04:05.%s", strings.Repeat("0", i))
}
}

View File

@@ -0,0 +1,27 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["schema.go"],
importmap = "go-common/vendor/github.com/siddontang/go-mysql/schema",
importpath = "github.com/siddontang/go-mysql/schema",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/juju/errors:go_default_library",
"//vendor/github.com/siddontang/go-mysql/mysql:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

363
vendor/github.com/siddontang/go-mysql/schema/schema.go generated vendored Normal file
View File

@@ -0,0 +1,363 @@
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package schema
import (
"database/sql"
"fmt"
"strings"
"github.com/juju/errors"
"github.com/siddontang/go-mysql/mysql"
)
var ErrTableNotExist = errors.New("table is not exist")
var ErrMissingTableMeta = errors.New("missing table meta")
var HAHealthCheckSchema = "mysql.ha_health_check"
const (
TYPE_NUMBER = iota + 1 // tinyint, smallint, mediumint, int, bigint, year
TYPE_FLOAT // float, double
TYPE_ENUM // enum
TYPE_SET // set
TYPE_STRING // other
TYPE_DATETIME // datetime
TYPE_TIMESTAMP // timestamp
TYPE_DATE // date
TYPE_TIME // time
TYPE_BIT // bit
TYPE_JSON // json
)
type TableColumn struct {
Name string
Type int
Collation string
RawType string
IsAuto bool
IsUnsigned bool
EnumValues []string
SetValues []string
}
type Index struct {
Name string
Columns []string
Cardinality []uint64
}
type Table struct {
Schema string
Name string
Columns []TableColumn
Indexes []*Index
PKColumns []int
}
func (ta *Table) String() string {
return fmt.Sprintf("%s.%s", ta.Schema, ta.Name)
}
func (ta *Table) AddColumn(name string, columnType string, collation string, extra string) {
index := len(ta.Columns)
ta.Columns = append(ta.Columns, TableColumn{Name: name, Collation: collation})
ta.Columns[index].RawType = columnType
if strings.HasPrefix(columnType, "float") ||
strings.HasPrefix(columnType, "double") ||
strings.HasPrefix(columnType, "decimal") {
ta.Columns[index].Type = TYPE_FLOAT
} else if strings.HasPrefix(columnType, "enum") {
ta.Columns[index].Type = TYPE_ENUM
ta.Columns[index].EnumValues = strings.Split(strings.Replace(
strings.TrimSuffix(
strings.TrimPrefix(
columnType, "enum("),
")"),
"'", "", -1),
",")
} else if strings.HasPrefix(columnType, "set") {
ta.Columns[index].Type = TYPE_SET
ta.Columns[index].SetValues = strings.Split(strings.Replace(
strings.TrimSuffix(
strings.TrimPrefix(
columnType, "set("),
")"),
"'", "", -1),
",")
} else if strings.HasPrefix(columnType, "datetime") {
ta.Columns[index].Type = TYPE_DATETIME
} else if strings.HasPrefix(columnType, "timestamp") {
ta.Columns[index].Type = TYPE_TIMESTAMP
} else if strings.HasPrefix(columnType, "time") {
ta.Columns[index].Type = TYPE_TIME
} else if "date" == columnType {
ta.Columns[index].Type = TYPE_DATE
} else if strings.HasPrefix(columnType, "bit") {
ta.Columns[index].Type = TYPE_BIT
} else if strings.HasPrefix(columnType, "json") {
ta.Columns[index].Type = TYPE_JSON
} else if strings.Contains(columnType, "int") || strings.HasPrefix(columnType, "year") {
ta.Columns[index].Type = TYPE_NUMBER
} else {
ta.Columns[index].Type = TYPE_STRING
}
if strings.Contains(columnType, "unsigned") || strings.Contains(columnType, "zerofill") {
ta.Columns[index].IsUnsigned = true
}
if extra == "auto_increment" {
ta.Columns[index].IsAuto = true
}
}
func (ta *Table) FindColumn(name string) int {
for i, col := range ta.Columns {
if col.Name == name {
return i
}
}
return -1
}
func (ta *Table) GetPKColumn(index int) *TableColumn {
return &ta.Columns[ta.PKColumns[index]]
}
func (ta *Table) AddIndex(name string) (index *Index) {
index = NewIndex(name)
ta.Indexes = append(ta.Indexes, index)
return index
}
func NewIndex(name string) *Index {
return &Index{name, make([]string, 0, 8), make([]uint64, 0, 8)}
}
func (idx *Index) AddColumn(name string, cardinality uint64) {
idx.Columns = append(idx.Columns, name)
if cardinality == 0 {
cardinality = uint64(len(idx.Cardinality) + 1)
}
idx.Cardinality = append(idx.Cardinality, cardinality)
}
func (idx *Index) FindColumn(name string) int {
for i, colName := range idx.Columns {
if name == colName {
return i
}
}
return -1
}
func IsTableExist(conn mysql.Executer, schema string, name string) (bool, error) {
query := fmt.Sprintf("SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%s' and TABLE_NAME = '%s' LIMIT 1", schema, name)
r, err := conn.Execute(query)
if err != nil {
return false, errors.Trace(err)
}
return r.RowNumber() == 1, nil
}
func NewTableFromSqlDB(conn *sql.DB, schema string, name string) (*Table, error) {
ta := &Table{
Schema: schema,
Name: name,
Columns: make([]TableColumn, 0, 16),
Indexes: make([]*Index, 0, 8),
}
if err := ta.fetchColumnsViaSqlDB(conn); err != nil {
return nil, errors.Trace(err)
}
if err := ta.fetchIndexesViaSqlDB(conn); err != nil {
return nil, errors.Trace(err)
}
return ta, nil
}
func NewTable(conn mysql.Executer, schema string, name string) (*Table, error) {
ta := &Table{
Schema: schema,
Name: name,
Columns: make([]TableColumn, 0, 16),
Indexes: make([]*Index, 0, 8),
}
if err := ta.fetchColumns(conn); err != nil {
return nil, errors.Trace(err)
}
if err := ta.fetchIndexes(conn); err != nil {
return nil, errors.Trace(err)
}
return ta, nil
}
func (ta *Table) fetchColumns(conn mysql.Executer) error {
r, err := conn.Execute(fmt.Sprintf("show full columns from `%s`.`%s`", ta.Schema, ta.Name))
if err != nil {
return errors.Trace(err)
}
for i := 0; i < r.RowNumber(); i++ {
name, _ := r.GetString(i, 0)
colType, _ := r.GetString(i, 1)
collation, _ := r.GetString(i, 2)
extra, _ := r.GetString(i, 6)
ta.AddColumn(name, colType, collation, extra)
}
return nil
}
func (ta *Table) fetchColumnsViaSqlDB(conn *sql.DB) error {
r, err := conn.Query(fmt.Sprintf("show full columns from `%s`.`%s`", ta.Schema, ta.Name))
if err != nil {
return errors.Trace(err)
}
defer r.Close()
var unusedVal interface{}
unused := &unusedVal
for r.Next() {
var name, colType, extra string
var collation sql.NullString
err := r.Scan(&name, &colType, &collation, &unused, &unused, &unused, &extra, &unused, &unused)
if err != nil {
return errors.Trace(err)
}
ta.AddColumn(name, colType, collation.String, extra)
}
return r.Err()
}
func (ta *Table) fetchIndexes(conn mysql.Executer) error {
r, err := conn.Execute(fmt.Sprintf("show index from `%s`.`%s`", ta.Schema, ta.Name))
if err != nil {
return errors.Trace(err)
}
var currentIndex *Index
currentName := ""
for i := 0; i < r.RowNumber(); i++ {
indexName, _ := r.GetString(i, 2)
if currentName != indexName {
currentIndex = ta.AddIndex(indexName)
currentName = indexName
}
cardinality, _ := r.GetUint(i, 6)
colName, _ := r.GetString(i, 4)
currentIndex.AddColumn(colName, cardinality)
}
return ta.fetchPrimaryKeyColumns()
}
func (ta *Table) fetchIndexesViaSqlDB(conn *sql.DB) error {
r, err := conn.Query(fmt.Sprintf("show index from `%s`.`%s`", ta.Schema, ta.Name))
if err != nil {
return errors.Trace(err)
}
defer r.Close()
var currentIndex *Index
currentName := ""
var unusedVal interface{}
unused := &unusedVal
for r.Next() {
var indexName, colName string
var cardinality interface{}
err := r.Scan(
&unused,
&unused,
&indexName,
&unused,
&colName,
&unused,
&cardinality,
&unused,
&unused,
&unused,
&unused,
&unused,
&unused,
)
if err != nil {
return errors.Trace(err)
}
if currentName != indexName {
currentIndex = ta.AddIndex(indexName)
currentName = indexName
}
c := toUint64(cardinality)
currentIndex.AddColumn(colName, c)
}
return ta.fetchPrimaryKeyColumns()
}
func toUint64(i interface{}) uint64 {
switch i := i.(type) {
case int:
return uint64(i)
case int8:
return uint64(i)
case int16:
return uint64(i)
case int32:
return uint64(i)
case int64:
return uint64(i)
case uint:
return uint64(i)
case uint8:
return uint64(i)
case uint16:
return uint64(i)
case uint32:
return uint64(i)
case uint64:
return uint64(i)
}
return 0
}
func (ta *Table) fetchPrimaryKeyColumns() error {
if len(ta.Indexes) == 0 {
return nil
}
pkIndex := ta.Indexes[0]
if pkIndex.Name != "PRIMARY" {
return nil
}
ta.PKColumns = make([]int, len(pkIndex.Columns))
for i, pkCol := range pkIndex.Columns {
ta.PKColumns[i] = ta.FindColumn(pkCol)
}
return nil
}