github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/syncer/ddl.go (about)

     1  // Copyright 2022 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package syncer
    15  
    16  import (
    17  	"fmt"
    18  	"strings"
    19  	"time"
    20  
    21  	"github.com/go-mysql-org/go-mysql/replication"
    22  	"github.com/pingcap/failpoint"
    23  	tidbddl "github.com/pingcap/tidb/pkg/ddl"
    24  	"github.com/pingcap/tidb/pkg/parser"
    25  	"github.com/pingcap/tidb/pkg/parser/ast"
    26  	"github.com/pingcap/tidb/pkg/parser/model"
    27  	"github.com/pingcap/tidb/pkg/parser/mysql"
    28  	"github.com/pingcap/tidb/pkg/table"
    29  	"github.com/pingcap/tidb/pkg/table/tables"
    30  	"github.com/pingcap/tidb/pkg/types"
    31  	"github.com/pingcap/tidb/pkg/util/filter"
    32  	tidbmock "github.com/pingcap/tidb/pkg/util/mock"
    33  	regexprrouter "github.com/pingcap/tidb/pkg/util/regexpr-router"
    34  	"github.com/pingcap/tiflow/dm/config"
    35  	"github.com/pingcap/tiflow/dm/pkg/binlog"
    36  	"github.com/pingcap/tiflow/dm/pkg/binlog/event"
    37  	"github.com/pingcap/tiflow/dm/pkg/conn"
    38  	tcontext "github.com/pingcap/tiflow/dm/pkg/context"
    39  	"github.com/pingcap/tiflow/dm/pkg/log"
    40  	parserpkg "github.com/pingcap/tiflow/dm/pkg/parser"
    41  	"github.com/pingcap/tiflow/dm/pkg/schema"
    42  	"github.com/pingcap/tiflow/dm/pkg/shardddl/optimism"
    43  	"github.com/pingcap/tiflow/dm/pkg/terror"
    44  	"github.com/pingcap/tiflow/dm/pkg/utils"
    45  	"github.com/pingcap/tiflow/dm/syncer/metrics"
    46  	onlineddl "github.com/pingcap/tiflow/dm/syncer/online-ddl-tools"
    47  	sm "github.com/pingcap/tiflow/dm/syncer/safe-mode"
    48  	"github.com/pingcap/tiflow/dm/syncer/shardddl"
    49  	bf "github.com/pingcap/tiflow/pkg/binlog-filter"
    50  	"go.uber.org/atomic"
    51  	"go.uber.org/zap"
    52  )
    53  
    54  type shardDDLStrategy interface {
    55  	// when preFilter returns true, it means we should skip this DDL
    56  	preFilter(ddlInfo *ddlInfo, qec *queryEventContext, sourceTable *filter.Table, targetTable *filter.Table) (bool, error)
    57  	// handle DDL handles query event
    58  	handleDDL(qec *queryEventContext) error
    59  }
    60  
    61  type DDLWorker struct {
    62  	logger log.Logger
    63  
    64  	strategy shardDDLStrategy
    65  
    66  	binlogFilter               *bf.BinlogEvent
    67  	metricsProxies             *metrics.Proxies
    68  	name                       string
    69  	workerName                 string
    70  	sourceID                   string
    71  	enableGTID                 bool
    72  	shardMode                  string
    73  	upstreamTZStr              string
    74  	onlineDDL                  onlineddl.OnlinePlugin
    75  	checkpoint                 CheckPoint
    76  	tableRouter                *regexprrouter.RouteTable
    77  	sourceTableNamesFlavor     conn.LowerCaseTableNamesFlavor
    78  	collationCompatible        string
    79  	charsetAndDefaultCollation map[string]string
    80  	idAndCollationMap          map[int]string
    81  	baList                     *filter.Filter
    82  
    83  	getTableInfo            func(tctx *tcontext.Context, sourceTable, targetTable *filter.Table) (*model.TableInfo, error)
    84  	getDBInfoFromDownstream func(tctx *tcontext.Context, sourceTable, targetTable *filter.Table) (*model.DBInfo, error)
    85  	recordSkipSQLsLocation  func(ec *eventContext) error
    86  	trackDDL                func(usedSchema string, trackInfo *ddlInfo, ec *eventContext) error
    87  	saveTablePoint          func(table *filter.Table, location binlog.Location)
    88  	flushJobs               func() error
    89  }
    90  
    91  // NewDDLWorker creates a new DDLWorker instance.
    92  func NewDDLWorker(pLogger *log.Logger, syncer *Syncer) *DDLWorker {
    93  	ddlWorker := &DDLWorker{
    94  		logger:                     pLogger.WithFields(zap.String("component", "ddl")),
    95  		binlogFilter:               syncer.binlogFilter,
    96  		metricsProxies:             syncer.metricsProxies,
    97  		name:                       syncer.cfg.Name,
    98  		workerName:                 syncer.cfg.WorkerName,
    99  		sourceID:                   syncer.cfg.SourceID,
   100  		enableGTID:                 syncer.cfg.EnableGTID,
   101  		shardMode:                  syncer.cfg.ShardMode,
   102  		upstreamTZStr:              syncer.upstreamTZStr,
   103  		onlineDDL:                  syncer.onlineDDL,
   104  		checkpoint:                 syncer.checkpoint,
   105  		tableRouter:                syncer.tableRouter,
   106  		sourceTableNamesFlavor:     syncer.SourceTableNamesFlavor,
   107  		collationCompatible:        syncer.cfg.CollationCompatible,
   108  		charsetAndDefaultCollation: syncer.charsetAndDefaultCollation,
   109  		idAndCollationMap:          syncer.idAndCollationMap,
   110  		baList:                     syncer.baList,
   111  		recordSkipSQLsLocation:     syncer.recordSkipSQLsLocation,
   112  		trackDDL:                   syncer.trackDDL,
   113  		saveTablePoint:             syncer.saveTablePoint,
   114  		flushJobs:                  syncer.flushJobs,
   115  		getTableInfo:               syncer.getTableInfo,
   116  		getDBInfoFromDownstream:    syncer.getDBInfoFromDownstream,
   117  	}
   118  	switch syncer.cfg.ShardMode {
   119  	case config.ShardPessimistic:
   120  		ddlWorker.strategy = NewPessimistDDL(&ddlWorker.logger, syncer)
   121  	case config.ShardOptimistic:
   122  		ddlWorker.strategy = NewOptimistDDL(&ddlWorker.logger, syncer)
   123  	default:
   124  		ddlWorker.strategy = NewNormalDDL(&ddlWorker.logger, syncer)
   125  	}
   126  	return ddlWorker
   127  }
   128  
   129  type Normal struct {
   130  	logger log.Logger
   131  
   132  	trackDDL      func(usedSchema string, trackInfo *ddlInfo, ec *eventContext) error
   133  	onlineDDL     onlineddl.OnlinePlugin
   134  	handleJobFunc func(*job) (bool, error)
   135  	execError     *atomic.Error
   136  }
   137  
   138  func NewNormalDDL(pLogger *log.Logger, syncer *Syncer) *Normal {
   139  	return &Normal{
   140  		logger:        pLogger.WithFields(zap.String("mode", "normal")),
   141  		trackDDL:      syncer.trackDDL,
   142  		onlineDDL:     syncer.onlineDDL,
   143  		handleJobFunc: syncer.handleJobFunc,
   144  		execError:     &syncer.execError,
   145  	}
   146  }
   147  
   148  type Pessimist struct {
   149  	logger         log.Logger
   150  	sgk            *ShardingGroupKeeper
   151  	checkpoint     CheckPoint
   152  	onlineDDL      onlineddl.OnlinePlugin
   153  	metricsProxies *metrics.Proxies
   154  	name           string
   155  	sourceID       string
   156  
   157  	execError        *atomic.Error
   158  	safeMode         *sm.SafeMode
   159  	trackDDL         func(usedSchema string, trackInfo *ddlInfo, ec *eventContext) error
   160  	handleJobFunc    func(*job) (bool, error)
   161  	flushCheckPoints func() error
   162  	saveTablePoint   func(table *filter.Table, location binlog.Location)
   163  	pessimist        *shardddl.Pessimist // shard DDL pessimist
   164  }
   165  
   166  func NewPessimistDDL(pLogger *log.Logger, syncer *Syncer) *Pessimist {
   167  	return &Pessimist{
   168  		logger:           pLogger.WithFields(zap.String("mode", "pessimist")),
   169  		sgk:              syncer.sgk,
   170  		checkpoint:       syncer.checkpoint,
   171  		onlineDDL:        syncer.onlineDDL,
   172  		metricsProxies:   syncer.metricsProxies,
   173  		name:             syncer.cfg.Name,
   174  		sourceID:         syncer.cfg.SourceID,
   175  		execError:        &syncer.execError,
   176  		safeMode:         syncer.safeMode,
   177  		trackDDL:         syncer.trackDDL,
   178  		handleJobFunc:    syncer.handleJobFunc,
   179  		flushCheckPoints: syncer.flushCheckPoints,
   180  		saveTablePoint:   syncer.saveTablePoint,
   181  		pessimist:        syncer.pessimist,
   182  	}
   183  }
   184  
   185  type Optimist struct {
   186  	logger log.Logger
   187  
   188  	schemaTracker *schema.Tracker
   189  	flavor        string
   190  	enableGTID    bool
   191  	onlineDDL     onlineddl.OnlinePlugin
   192  	checkpoint    CheckPoint
   193  	trackDDL      func(usedSchema string, trackInfo *ddlInfo, ec *eventContext) error
   194  	handleJobFunc func(*job) (bool, error)
   195  	osgk          *OptShardingGroupKeeper // optimistic ddl's keeper to keep all sharding (sub) group in this syncer
   196  	getTableInfo  func(tctx *tcontext.Context, sourceTable, targetTable *filter.Table) (*model.TableInfo, error)
   197  	execError     *atomic.Error
   198  	optimist      *shardddl.Optimist // shard DDL optimist
   199  	strict        bool
   200  }
   201  
   202  func NewOptimistDDL(pLogger *log.Logger, syncer *Syncer) *Optimist {
   203  	return &Optimist{
   204  		logger:        pLogger.WithFields(zap.String("mode", "optimist")),
   205  		schemaTracker: syncer.schemaTracker,
   206  		flavor:        syncer.cfg.Flavor,
   207  		enableGTID:    syncer.cfg.EnableGTID,
   208  		onlineDDL:     syncer.onlineDDL,
   209  		checkpoint:    syncer.checkpoint,
   210  		trackDDL:      syncer.trackDDL,
   211  		handleJobFunc: syncer.handleJobFunc,
   212  		osgk:          syncer.osgk,
   213  		getTableInfo:  syncer.getTableInfo,
   214  		execError:     &syncer.execError,
   215  		optimist:      syncer.optimist,
   216  		strict:        syncer.cfg.StrictOptimisticShardMode,
   217  	}
   218  }
   219  
   220  func (ddl *DDLWorker) HandleQueryEvent(ev *replication.QueryEvent, ec eventContext, originSQL string) (err error) {
   221  	if originSQL == "BEGIN" {
   222  		return nil
   223  	}
   224  
   225  	codec, err := event.GetCharsetCodecByStatusVars(ev.StatusVars)
   226  	if err != nil {
   227  		ddl.logger.Error("get charset codec failed, will treat query as utf8", zap.Error(err))
   228  	} else if codec != nil {
   229  		converted, err2 := codec.NewDecoder().String(originSQL)
   230  		if err2 != nil {
   231  			ddl.logger.Error("convert query string failed, will treat query as utf8", zap.Error(err2))
   232  		} else {
   233  			originSQL = converted
   234  		}
   235  	}
   236  
   237  	qec := &queryEventContext{
   238  		eventContext:    &ec,
   239  		ddlSchema:       string(ev.Schema),
   240  		originSQL:       utils.TrimCtrlChars(originSQL),
   241  		splitDDLs:       make([]string, 0),
   242  		appliedDDLs:     make([]string, 0),
   243  		sourceTbls:      make(map[string]map[string]struct{}),
   244  		eventStatusVars: ev.StatusVars,
   245  	}
   246  
   247  	defer func() {
   248  		if err == nil {
   249  			return
   250  		}
   251  		// why not `skipSQLByPattern` at beginning, but at defer?
   252  		// it is in order to track every ddl except for the one that will cause error.
   253  		// if `skipSQLByPattern` at beginning, some ddl should be tracked may be skipped.
   254  		needSkip, err2 := skipSQLByPattern(ddl.binlogFilter, qec.originSQL)
   255  		if err2 != nil {
   256  			err = err2
   257  			return
   258  		}
   259  		if !needSkip {
   260  			return
   261  		}
   262  		// don't return error if filter success
   263  		ddl.metricsProxies.SkipBinlogDurationHistogram.WithLabelValues("query", ddl.name, ddl.sourceID).Observe(time.Since(ec.startTime).Seconds())
   264  		ddl.logger.Warn("skip event", zap.String("event", "query"), zap.Stringer("query event context", qec))
   265  		err = ddl.recordSkipSQLsLocation(&ec)
   266  	}()
   267  
   268  	qec.p, err = event.GetParserForStatusVars(ev.StatusVars)
   269  	if err != nil {
   270  		ddl.logger.Warn("found error when getting sql_mode from binlog status_vars", zap.Error(err))
   271  	}
   272  
   273  	qec.timezone, err = event.GetTimezoneByStatusVars(ev.StatusVars, ddl.upstreamTZStr)
   274  	// no timezone information retrieved and upstream timezone not previously set
   275  	if err != nil && ddl.upstreamTZStr == "" {
   276  		ddl.logger.Warn("found error when getting timezone from binlog status_vars", zap.Error(err))
   277  	}
   278  
   279  	qec.timestamp = ec.header.Timestamp
   280  
   281  	stmt, err := parseOneStmt(qec)
   282  	if err != nil {
   283  		return err
   284  	}
   285  
   286  	if _, ok := stmt.(ast.DDLNode); !ok {
   287  		ddl.logger.Info("ddl that dm doesn't handle, skip it", zap.String("event", "query"),
   288  			zap.Stringer("queryEventContext", qec))
   289  		return ddl.recordSkipSQLsLocation(qec.eventContext)
   290  	}
   291  
   292  	if qec.shardingReSync != nil {
   293  		qec.shardingReSync.currLocation = qec.endLocation
   294  		// TODO: refactor this, see https://github.com/pingcap/tiflow/issues/6691
   295  		// for optimistic ddl, we can resync idemponent ddl.
   296  		cmp := binlog.CompareLocation(qec.shardingReSync.currLocation, qec.shardingReSync.latestLocation, ddl.enableGTID)
   297  		if cmp > 0 || (cmp == 0 && ddl.shardMode != config.ShardOptimistic) {
   298  			ddl.logger.Info("re-replicate shard group was completed", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   299  			return qec.closeShardingResync()
   300  		} else if ddl.shardMode != config.ShardOptimistic {
   301  			ddl.logger.Debug("skip event in re-replicating sharding group", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   302  			return nil
   303  		}
   304  		// optimistic shard mode handle situation will be handled through table point after
   305  		// we split ddls and handle the appliedDDLs
   306  	}
   307  
   308  	ddl.logger.Info("ready to split ddl", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   309  
   310  	// TiDB can't handle multi schema change DDL, so we split it here.
   311  	qec.splitDDLs, err = parserpkg.SplitDDL(stmt, qec.ddlSchema)
   312  	if err != nil {
   313  		return err
   314  	}
   315  
   316  	// for DDL, we don't apply operator until we try to execute it. so can handle sharding cases
   317  	// We use default parser because inside function where need parser, sqls are came from parserpkg.SplitDDL, which is StringSingleQuotes, KeyWordUppercase and NameBackQuotes
   318  	// TODO: save stmt, tableName to avoid parse the sql to get them again
   319  	qec.p = parser.New()
   320  	for _, sql := range qec.splitDDLs {
   321  		sqls, err2 := ddl.processOneDDL(qec, sql)
   322  		if err2 != nil {
   323  			ddl.logger.Error("fail to process ddl", zap.String("event", "query"), zap.Stringer("queryEventContext", qec), log.ShortError(err2))
   324  			return err2
   325  		}
   326  		qec.appliedDDLs = append(qec.appliedDDLs, sqls...)
   327  	}
   328  	ddl.logger.Info("resolve sql", zap.String("event", "query"), zap.Strings("appliedDDLs", qec.appliedDDLs), zap.Stringer("queryEventContext", qec))
   329  
   330  	ddl.metricsProxies.BinlogEventCost.WithLabelValues(metrics.BinlogEventCostStageGenQuery, ddl.name, ddl.workerName, ddl.sourceID).Observe(time.Since(qec.startTime).Seconds())
   331  
   332  	/*
   333  		we construct a application transaction for ddl. we save checkpoint after we execute all ddls
   334  		Here's a brief discussion for implement:
   335  		* non sharding table: make no difference
   336  		* sharding table - we limit one ddl event only contains operation for same table
   337  		  * drop database / drop table / truncate table: we ignore these operations
   338  		  * create database / create table / create index / drop index / alter table:
   339  			operation is only for same table,  make no difference
   340  		  * rename table
   341  			* online ddl: we would ignore rename ghost table,  make no difference
   342  			* other rename: we don't allow user to execute more than one rename operation in one ddl event, then it would make no difference
   343  	*/
   344  
   345  	qec.needHandleDDLs = make([]string, 0, len(qec.appliedDDLs))
   346  	qec.trackInfos = make([]*ddlInfo, 0, len(qec.appliedDDLs))
   347  
   348  	// handle one-schema change DDL
   349  	for _, sql := range qec.appliedDDLs {
   350  		if len(sql) == 0 {
   351  			continue
   352  		}
   353  		// We use default parser because sqls are came from above *Syncer.splitAndFilterDDL, which is StringSingleQuotes, KeyWordUppercase and NameBackQuotes
   354  		ddlInfo, err2 := ddl.genDDLInfo(qec, sql)
   355  		if err2 != nil {
   356  			return err2
   357  		}
   358  		sourceTable := ddlInfo.sourceTables[0]
   359  		targetTable := ddlInfo.targetTables[0]
   360  		if len(ddlInfo.routedDDL) == 0 {
   361  			ddl.metricsProxies.SkipBinlogDurationHistogram.WithLabelValues("query", ddl.name, ddl.sourceID).Observe(time.Since(qec.startTime).Seconds())
   362  			ddl.logger.Warn("skip event", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", qec.ddlSchema))
   363  			continue
   364  		}
   365  
   366  		// DDL is sequentially synchronized in this syncer's main process goroutine
   367  		// filter DDL that is older or same as table checkpoint, to avoid sync again for already synced DDLs
   368  		if ddl.checkpoint.IsOlderThanTablePoint(sourceTable, qec.endLocation) {
   369  			ddl.logger.Info("filter obsolete DDL", zap.String("event", "query"), zap.String("statement", sql), log.WrapStringerField("location", qec.endLocation))
   370  			continue
   371  		}
   372  
   373  		// pre-filter of sharding
   374  		if filter, err2 := ddl.strategy.preFilter(ddlInfo, qec, sourceTable, targetTable); err2 != nil {
   375  			return err2
   376  		} else if filter {
   377  			continue
   378  		}
   379  
   380  		qec.needHandleDDLs = append(qec.needHandleDDLs, ddlInfo.routedDDL)
   381  		qec.trackInfos = append(qec.trackInfos, ddlInfo)
   382  		// TODO: current table checkpoints will be deleted in track ddls, but created and updated in flush checkpoints,
   383  		//       we should use a better mechanism to combine these operations
   384  		if ddl.shardMode == "" {
   385  			recordSourceTbls(qec.sourceTbls, ddlInfo.stmtCache, sourceTable)
   386  		}
   387  	}
   388  
   389  	ddl.logger.Info("prepare to handle ddls", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   390  	if len(qec.needHandleDDLs) == 0 {
   391  		ddl.logger.Info("skip event, need handled ddls is empty", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   392  		return ddl.recordSkipSQLsLocation(qec.eventContext)
   393  	}
   394  
   395  	// interrupted before flush old checkpoint.
   396  	failpoint.Inject("FlushCheckpointStage", func(val failpoint.Value) {
   397  		err = handleFlushCheckpointStage(0, val.(int), "before flush old checkpoint")
   398  		if err != nil {
   399  			failpoint.Return(err)
   400  		}
   401  	})
   402  
   403  	// flush previous DMLs and checkpoint if needing to handle the DDL.
   404  	// NOTE: do this flush before operations on shard groups which may lead to skip a table caused by `UnresolvedTables`.
   405  	if err = ddl.flushJobs(); err != nil {
   406  		return err
   407  	}
   408  
   409  	return ddl.strategy.handleDDL(qec)
   410  }
   411  
   412  func (ddl *Normal) preFilter(*ddlInfo, *queryEventContext, *filter.Table, *filter.Table) (bool, error) {
   413  	return false, nil
   414  }
   415  
   416  func (ddl *Normal) handleDDL(qec *queryEventContext) error {
   417  	ddl.logger.Info("start to handle ddls in normal mode", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   418  
   419  	// interrupted after flush old checkpoint and before track DDL.
   420  	failpoint.Inject("FlushCheckpointStage", func(val failpoint.Value) {
   421  		err := handleFlushCheckpointStage(1, val.(int), "before track DDL")
   422  		if err != nil {
   423  			failpoint.Return(err)
   424  		}
   425  	})
   426  
   427  	// run trackDDL before add ddl job to make sure checkpoint can be flushed
   428  	for _, trackInfo := range qec.trackInfos {
   429  		if err := ddl.trackDDL(qec.ddlSchema, trackInfo, qec.eventContext); err != nil {
   430  			return err
   431  		}
   432  	}
   433  
   434  	// interrupted after track DDL and before execute DDL.
   435  	failpoint.Inject("FlushCheckpointStage", func(val failpoint.Value) {
   436  		err := handleFlushCheckpointStage(2, val.(int), "before execute DDL")
   437  		if err != nil {
   438  			failpoint.Return(err)
   439  		}
   440  	})
   441  
   442  	job := newDDLJob(qec)
   443  	_, err := ddl.handleJobFunc(job)
   444  	if err != nil {
   445  		return err
   446  	}
   447  
   448  	// when add ddl job, will execute ddl and then flush checkpoint.
   449  	// if execute ddl failed, the execError will be set to that error.
   450  	// return nil here to avoid duplicate error message
   451  	err = ddl.execError.Load()
   452  	if err != nil {
   453  		ddl.logger.Error("error detected when executing SQL job", log.ShortError(err))
   454  		// nolint:nilerr
   455  		return nil
   456  	}
   457  
   458  	ddl.logger.Info("finish to handle ddls in normal mode", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   459  
   460  	if qec.onlineDDLTable != nil {
   461  		ddl.logger.Info("finish online ddl and clear online ddl metadata in normal mode",
   462  			zap.String("event", "query"),
   463  			zap.Strings("ddls", qec.needHandleDDLs),
   464  			zap.String("raw statement", qec.originSQL),
   465  			zap.Stringer("table", qec.onlineDDLTable))
   466  		err2 := ddl.onlineDDL.Finish(qec.tctx, qec.onlineDDLTable)
   467  		if err2 != nil {
   468  			return terror.Annotatef(err2, "finish online ddl on %v", qec.onlineDDLTable)
   469  		}
   470  	}
   471  
   472  	return nil
   473  }
   474  
   475  func (ddl *Pessimist) preFilter(ddlInfo *ddlInfo, qec *queryEventContext, sourceTable *filter.Table, targetTable *filter.Table) (bool, error) {
   476  	switch ddlInfo.stmtCache.(type) {
   477  	case *ast.DropDatabaseStmt:
   478  		err := ddl.dropSchemaInSharding(qec.tctx, sourceTable.Schema)
   479  		if err != nil {
   480  			return false, err
   481  		}
   482  		return true, nil
   483  	case *ast.DropTableStmt:
   484  		sourceTableID := utils.GenTableID(sourceTable)
   485  		err := ddl.sgk.LeaveGroup(targetTable, []string{sourceTableID})
   486  		if err != nil {
   487  			return false, err
   488  		}
   489  		err = ddl.checkpoint.DeleteTablePoint(qec.tctx, sourceTable)
   490  		if err != nil {
   491  			return false, err
   492  		}
   493  		return true, nil
   494  	case *ast.TruncateTableStmt:
   495  		ddl.logger.Info("filter truncate table statement in shard group", zap.String("event", "query"), zap.String("statement", ddlInfo.routedDDL))
   496  		return true, nil
   497  	}
   498  
   499  	// in sharding mode, we only support to do one ddl in one event
   500  	if qec.shardingDDLInfo == nil {
   501  		qec.shardingDDLInfo = ddlInfo
   502  	} else if qec.shardingDDLInfo.sourceTables[0].String() != sourceTable.String() {
   503  		return false, terror.ErrSyncerUnitDDLOnMultipleTable.Generate(qec.originSQL)
   504  	}
   505  
   506  	return false, nil
   507  }
   508  
   509  func (ddl *Pessimist) handleDDL(qec *queryEventContext) error {
   510  	var (
   511  		err                error
   512  		needShardingHandle bool
   513  		group              *ShardingGroup
   514  		synced             bool
   515  		active             bool
   516  		remain             int
   517  
   518  		ddlInfo        = qec.shardingDDLInfo
   519  		sourceTableID  = utils.GenTableID(ddlInfo.sourceTables[0])
   520  		needHandleDDLs = qec.needHandleDDLs
   521  		// for sharding DDL, the firstPos should be the `Pos` of the binlog, not the `End_log_pos`
   522  		// so when restarting before sharding DDLs synced, this binlog can be re-sync again to trigger the TrySync
   523  		startLocation = qec.startLocation
   524  		endLocation   = qec.endLocation
   525  	)
   526  
   527  	var annotate string
   528  	switch ddlInfo.stmtCache.(type) {
   529  	case *ast.CreateDatabaseStmt:
   530  		// for CREATE DATABASE, we do nothing. when CREATE TABLE under this DATABASE, sharding groups will be added
   531  	case *ast.CreateTableStmt:
   532  		// for CREATE TABLE, we add it to group
   533  		needShardingHandle, group, synced, remain, err = ddl.sgk.AddGroup(ddlInfo.targetTables[0], []string{sourceTableID}, nil, true)
   534  		if err != nil {
   535  			return err
   536  		}
   537  		annotate = "add table to shard group"
   538  	default:
   539  		needShardingHandle, group, synced, active, remain, err = ddl.sgk.TrySync(ddlInfo.sourceTables[0], ddlInfo.targetTables[0], startLocation, qec.endLocation, needHandleDDLs)
   540  		if err != nil {
   541  			return err
   542  		}
   543  		annotate = "try to sync table in shard group"
   544  		// meets DDL that will not be processed in sequence sharding
   545  		if !active {
   546  			ddl.logger.Info("skip in-activeDDL",
   547  				zap.String("event", "query"),
   548  				zap.Stringer("queryEventContext", qec),
   549  				zap.String("sourceTableID", sourceTableID),
   550  				zap.Bool("in-sharding", needShardingHandle),
   551  				zap.Bool("is-synced", synced),
   552  				zap.Int("unsynced", remain))
   553  			return nil
   554  		}
   555  	}
   556  
   557  	ddl.logger.Info(annotate,
   558  		zap.String("event", "query"),
   559  		zap.Stringer("queryEventContext", qec),
   560  		zap.String("sourceTableID", sourceTableID),
   561  		zap.Bool("in-sharding", needShardingHandle),
   562  		zap.Bool("is-synced", synced),
   563  		zap.Int("unsynced", remain))
   564  
   565  	// interrupted after flush old checkpoint and before track DDL.
   566  	failpoint.Inject("FlushCheckpointStage", func(val failpoint.Value) {
   567  		err = handleFlushCheckpointStage(1, val.(int), "before track DDL")
   568  		if err != nil {
   569  			failpoint.Return(err)
   570  		}
   571  	})
   572  
   573  	for _, trackInfo := range qec.trackInfos {
   574  		if err = ddl.trackDDL(qec.ddlSchema, trackInfo, qec.eventContext); err != nil {
   575  			return err
   576  		}
   577  	}
   578  
   579  	if needShardingHandle {
   580  		ddl.metricsProxies.UnsyncedTableGauge.WithLabelValues(ddl.name, ddlInfo.targetTables[0].String(), ddl.sourceID).Set(float64(remain))
   581  		err = ddl.safeMode.IncrForTable(qec.tctx, ddlInfo.targetTables[0]) // try enable safe-mode when starting syncing for sharding group
   582  		if err != nil {
   583  			return err
   584  		}
   585  
   586  		// save checkpoint in memory, don't worry, if error occurred, we can rollback it
   587  		// for non-last sharding DDL's table, this checkpoint will be used to skip binlog event when re-syncing
   588  		// NOTE: when last sharding DDL executed, all this checkpoints will be flushed in the same txn
   589  		ddl.logger.Info("save table checkpoint for source",
   590  			zap.String("event", "query"),
   591  			zap.String("sourceTableID", sourceTableID),
   592  			zap.Stringer("start location", startLocation),
   593  			log.WrapStringerField("end location", endLocation))
   594  		ddl.saveTablePoint(ddlInfo.sourceTables[0], endLocation)
   595  		if !synced {
   596  			ddl.logger.Info("source shard group is not synced",
   597  				zap.String("event", "query"),
   598  				zap.String("sourceTableID", sourceTableID),
   599  				zap.Stringer("start location", startLocation),
   600  				log.WrapStringerField("end location", endLocation))
   601  			return nil
   602  		}
   603  
   604  		ddl.logger.Info("source shard group is synced",
   605  			zap.String("event", "query"),
   606  			zap.String("sourceTableID", sourceTableID),
   607  			zap.Stringer("start location", startLocation),
   608  			log.WrapStringerField("end location", endLocation))
   609  		err = ddl.safeMode.DescForTable(qec.tctx, ddlInfo.targetTables[0]) // try disable safe-mode after sharding group synced
   610  		if err != nil {
   611  			return err
   612  		}
   613  		// maybe multi-groups' sharding DDL synced in this for-loop (one query-event, multi tables)
   614  		if cap(*qec.shardingReSyncCh) < len(needHandleDDLs) {
   615  			*qec.shardingReSyncCh = make(chan *ShardingReSync, len(needHandleDDLs))
   616  		}
   617  		firstEndLocation := group.FirstEndPosUnresolved()
   618  		if firstEndLocation == nil {
   619  			return terror.ErrSyncerUnitFirstEndPosNotFound.Generate(sourceTableID)
   620  		}
   621  
   622  		allResolved, err2 := ddl.sgk.ResolveShardingDDL(ddlInfo.targetTables[0])
   623  		if err2 != nil {
   624  			return err2
   625  		}
   626  		*qec.shardingReSyncCh <- &ShardingReSync{
   627  			currLocation:   *firstEndLocation,
   628  			latestLocation: endLocation,
   629  			targetTable:    ddlInfo.targetTables[0],
   630  			allResolved:    allResolved,
   631  		}
   632  
   633  		// Don't send new DDLInfo to dm-master until all local sql jobs finished
   634  		// since jobWg is flushed by flushJobs before, we don't wait here any more
   635  
   636  		// NOTE: if we need singleton Syncer (without dm-master) to support sharding DDL sync
   637  		// we should add another config item to differ, and do not save DDLInfo, and not wait for ddlExecInfo
   638  
   639  		// construct & send shard DDL info into etcd, DM-master will handle it.
   640  		shardInfo := ddl.pessimist.ConstructInfo(ddlInfo.targetTables[0].Schema, ddlInfo.targetTables[0].Name, needHandleDDLs)
   641  		rev, err2 := ddl.pessimist.PutInfo(qec.tctx.Ctx, shardInfo)
   642  		if err2 != nil {
   643  			return err2
   644  		}
   645  		ddl.metricsProxies.Metrics.ShardLockResolving.Set(1) // block and wait DDL lock to be synced
   646  		ddl.logger.Info("putted shard DDL info", zap.Stringer("info", shardInfo), zap.Int64("revision", rev))
   647  
   648  		shardOp, err2 := ddl.pessimist.GetOperation(qec.tctx.Ctx, shardInfo, rev+1)
   649  		ddl.metricsProxies.Metrics.ShardLockResolving.Set(0)
   650  		if err2 != nil {
   651  			return err2
   652  		}
   653  
   654  		if shardOp.Exec {
   655  			failpoint.Inject("ShardSyncedExecutionExit", func() {
   656  				ddl.logger.Warn("exit triggered", zap.String("failpoint", "ShardSyncedExecutionExit"))
   657  				//nolint:errcheck
   658  				ddl.flushCheckPoints()
   659  				utils.OsExit(1)
   660  			})
   661  			failpoint.Inject("SequenceShardSyncedExecutionExit", func() {
   662  				group := ddl.sgk.Group(ddlInfo.targetTables[0])
   663  				if group != nil {
   664  					// exit in the first round sequence sharding DDL only
   665  					if group.meta.ActiveIdx() == 1 {
   666  						ddl.logger.Warn("exit triggered", zap.String("failpoint", "SequenceShardSyncedExecutionExit"))
   667  						//nolint:errcheck
   668  						ddl.flushCheckPoints()
   669  						utils.OsExit(1)
   670  					}
   671  				}
   672  			})
   673  
   674  			ddl.logger.Info("execute DDL job",
   675  				zap.String("event", "query"),
   676  				zap.Stringer("queryEventContext", qec),
   677  				zap.String("sourceTableID", sourceTableID),
   678  				zap.Stringer("operation", shardOp))
   679  		} else {
   680  			ddl.logger.Info("ignore DDL job",
   681  				zap.String("event", "query"),
   682  				zap.Stringer("queryEventContext", qec),
   683  				zap.String("sourceTableID", sourceTableID),
   684  				zap.Stringer("operation", shardOp))
   685  		}
   686  	}
   687  
   688  	ddl.logger.Info("start to handle ddls in shard mode", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   689  
   690  	// interrupted after track DDL and before execute DDL.
   691  	failpoint.Inject("FlushCheckpointStage", func(val failpoint.Value) {
   692  		err = handleFlushCheckpointStage(2, val.(int), "before execute DDL")
   693  		if err != nil {
   694  			failpoint.Return(err)
   695  		}
   696  	})
   697  
   698  	job := newDDLJob(qec)
   699  	_, err = ddl.handleJobFunc(job)
   700  	if err != nil {
   701  		return err
   702  	}
   703  
   704  	err = ddl.execError.Load()
   705  	if err != nil {
   706  		ddl.logger.Error("error detected when executing SQL job", log.ShortError(err))
   707  		// nolint:nilerr
   708  		return nil
   709  	}
   710  
   711  	if qec.onlineDDLTable != nil {
   712  		err = ddl.clearOnlineDDL(qec.tctx, ddlInfo.targetTables[0])
   713  		if err != nil {
   714  			return err
   715  		}
   716  	}
   717  
   718  	ddl.logger.Info("finish to handle ddls in shard mode", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   719  	return nil
   720  }
   721  
   722  func (ddl *Optimist) preFilter(ddlInfo *ddlInfo, qec *queryEventContext, sourceTable *filter.Table, targetTable *filter.Table) (bool, error) {
   723  	if ddl.osgk.inConflictStage(sourceTable, targetTable) {
   724  		// if in unsync stage and not before active DDL, filter it
   725  		// if in sharding re-sync stage and not before active DDL (the next DDL to be synced), filter it
   726  		ddl.logger.Info("replicate sharding DDL, filter Conflicted table's ddl events",
   727  			zap.String("event", "query"),
   728  			zap.Stringer("source", sourceTable),
   729  			log.WrapStringerField("location", qec.endLocation))
   730  		return true, nil
   731  	} else if qec.shardingReSync != nil && qec.shardingReSync.targetTable.String() != targetTable.String() {
   732  		// in re-syncing, ignore non current sharding group's events
   733  		ddl.logger.Info("skip event in re-replicating shard group", zap.String("event", "query"), zap.Stringer("re-shard", qec.shardingReSync))
   734  		return true, nil
   735  	}
   736  	switch ddlInfo.stmtCache.(type) {
   737  	case *ast.TruncateTableStmt:
   738  		ddl.logger.Info("filter truncate table statement in shard group", zap.String("event", "query"), zap.String("statement", ddlInfo.routedDDL))
   739  		return true, nil
   740  	case *ast.RenameTableStmt:
   741  		return false, terror.ErrSyncerUnsupportedStmt.Generate("RENAME TABLE", config.ShardOptimistic)
   742  	}
   743  	return false, nil
   744  }
   745  
   746  func (ddl *Optimist) handleDDL(qec *queryEventContext) error {
   747  	// interrupted after flush old checkpoint and before track DDL.
   748  	failpoint.Inject("FlushCheckpointStage", func(val failpoint.Value) {
   749  		err := handleFlushCheckpointStage(1, val.(int), "before track DDL")
   750  		if err != nil {
   751  			failpoint.Return(err)
   752  		}
   753  	})
   754  
   755  	var (
   756  		upTable   *filter.Table
   757  		downTable *filter.Table
   758  
   759  		isDBDDL  bool
   760  		tiBefore *model.TableInfo
   761  		tiAfter  *model.TableInfo
   762  		tisAfter []*model.TableInfo
   763  		err      error
   764  
   765  		trackInfos = qec.trackInfos
   766  	)
   767  
   768  	err = ddl.execError.Load()
   769  	if err != nil {
   770  		ddl.logger.Error("error detected when executing SQL job", log.ShortError(err))
   771  		// nolint:nilerr
   772  		return nil
   773  	}
   774  
   775  	switch trackInfos[0].stmtCache.(type) {
   776  	case *ast.CreateDatabaseStmt, *ast.DropDatabaseStmt, *ast.AlterDatabaseStmt:
   777  		isDBDDL = true
   778  	}
   779  
   780  	for _, trackInfo := range trackInfos {
   781  		// check whether do shard DDL for multi upstream tables.
   782  		if upTable != nil && upTable.String() != "``" && upTable.String() != trackInfo.sourceTables[0].String() {
   783  			return terror.ErrSyncerUnitDDLOnMultipleTable.Generate(qec.originSQL)
   784  		}
   785  		upTable = trackInfo.sourceTables[0]
   786  		downTable = trackInfo.targetTables[0]
   787  	}
   788  
   789  	if !isDBDDL {
   790  		if _, ok := trackInfos[0].stmtCache.(*ast.CreateTableStmt); !ok {
   791  			tiBefore, err = ddl.getTableInfo(qec.tctx, upTable, downTable)
   792  			if err != nil {
   793  				return err
   794  			}
   795  		}
   796  	}
   797  
   798  	for _, trackInfo := range trackInfos {
   799  		if err = ddl.trackDDL(qec.ddlSchema, trackInfo, qec.eventContext); err != nil {
   800  			return err
   801  		}
   802  		if !isDBDDL {
   803  			tiAfter, err = ddl.getTableInfo(qec.tctx, upTable, downTable)
   804  			if err != nil {
   805  				return err
   806  			}
   807  			tisAfter = append(tisAfter, tiAfter)
   808  		}
   809  	}
   810  
   811  	// in optimistic mode, don't `saveTablePoint` before execute DDL,
   812  	// because it has no `UnresolvedTables` to prevent the flush of this checkpoint.
   813  
   814  	info := ddl.optimist.ConstructInfo(upTable.Schema, upTable.Name, downTable.Schema, downTable.Name, qec.needHandleDDLs, tiBefore, tisAfter)
   815  
   816  	var (
   817  		rev    int64
   818  		skipOp bool
   819  		op     optimism.Operation
   820  	)
   821  	switch trackInfos[0].stmtCache.(type) {
   822  	case *ast.CreateDatabaseStmt, *ast.AlterDatabaseStmt:
   823  		// need to execute the DDL to the downstream, but do not do the coordination with DM-master.
   824  		op.DDLs = qec.needHandleDDLs
   825  		skipOp = true
   826  	case *ast.DropDatabaseStmt:
   827  		skipOp = true
   828  		ddl.osgk.RemoveSchema(upTable.Schema)
   829  	case *ast.CreateTableStmt:
   830  		// need to execute the DDL to the downstream, but do not do the coordination with DM-master.
   831  		op.DDLs = qec.needHandleDDLs
   832  		skipOp = true
   833  		if err = ddl.checkpoint.FlushPointsWithTableInfos(qec.tctx, []*filter.Table{upTable}, []*model.TableInfo{tiAfter}); err != nil {
   834  			ddl.logger.Error("failed to flush create table info", zap.Stringer("table", upTable), zap.Strings("ddls", qec.needHandleDDLs), log.ShortError(err))
   835  		}
   836  		if _, err = ddl.optimist.AddTable(info); err != nil {
   837  			return err
   838  		}
   839  	case *ast.DropTableStmt:
   840  		skipOp = true
   841  		if _, err = ddl.optimist.RemoveTable(info); err != nil {
   842  			return err
   843  		}
   844  		ddl.osgk.RemoveGroup(downTable, []string{utils.GenTableID(upTable)})
   845  	default:
   846  		rev, err = ddl.optimist.PutInfo(info)
   847  		if err != nil {
   848  			return err
   849  		}
   850  	}
   851  
   852  	ddl.logger.Info("putted a shard DDL info into etcd", zap.Stringer("info", info))
   853  	if !skipOp {
   854  		for {
   855  			op, err = ddl.optimist.GetOperation(qec.tctx.Ctx, info, rev+1)
   856  			if err != nil {
   857  				return err
   858  			}
   859  			ddl.logger.Info("got a shard DDL lock operation", zap.Stringer("operation", op))
   860  			if op.ConflictStage != optimism.ConflictDetected {
   861  				break
   862  			}
   863  			if ddl.strict {
   864  				return terror.ErrSyncerShardDDLConflict.Generate(qec.needHandleDDLs, op.ConflictMsg)
   865  			}
   866  			rev = op.Revision
   867  			ddl.logger.Info("operation conflict detected, waiting for resolve", zap.Stringer("info", info))
   868  		}
   869  	}
   870  
   871  	switch op.ConflictStage {
   872  	case optimism.ConflictError:
   873  		return terror.ErrSyncerShardDDLConflict.Generate(qec.needHandleDDLs, op.ConflictMsg)
   874  	// if this ddl is a ConflictSkipWaitRedirect ddl, we should skip all this worker's following ddls/dmls until the lock is resolved.
   875  	// To do this, we append this table to osgk to prevent the following ddl/dmls from being executed.
   876  	// conflict location must be the start location for current received ddl event.
   877  	case optimism.ConflictSkipWaitRedirect:
   878  		if ddl.strict {
   879  			return terror.ErrSyncerShardDDLConflict.Generate(qec.needHandleDDLs, "")
   880  		}
   881  		// TODO: check if we don't need Clone for startLocation
   882  		first := ddl.osgk.appendConflictTable(upTable, downTable, qec.startLocation.Clone(), ddl.flavor, ddl.enableGTID)
   883  		if first {
   884  			ddl.optimist.GetRedirectOperation(qec.tctx.Ctx, info, op.Revision+1)
   885  		}
   886  		// This conflicted ddl is not executed in downstream, so we need to revert tableInfo in schemaTracker to `tiBefore`.
   887  		err = ddl.schemaTracker.DropTable(upTable)
   888  		if err != nil {
   889  			ddl.logger.Error("fail to drop table to rollback table in schema tracker", zap.Stringer("table", upTable))
   890  		} else {
   891  			err = ddl.schemaTracker.CreateTableIfNotExists(upTable, tiBefore)
   892  			if err != nil {
   893  				ddl.logger.Error("fail to recreate table to rollback table in schema tracker", zap.Stringer("table", upTable))
   894  			} else {
   895  				ddl.logger.Info("skip conflict ddls in optimistic shard mode", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   896  			}
   897  		}
   898  		return err
   899  	}
   900  
   901  	// updated needHandleDDLs to DDLs received from DM-master.
   902  	qec.needHandleDDLs = op.DDLs
   903  
   904  	ddl.logger.Info("start to handle ddls in optimistic shard mode", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   905  
   906  	// interrupted after track DDL and before execute DDL.
   907  	failpoint.Inject("FlushCheckpointStage", func(val failpoint.Value) {
   908  		err = handleFlushCheckpointStage(2, val.(int), "before execute DDL")
   909  		if err != nil {
   910  			failpoint.Return(err)
   911  		}
   912  	})
   913  
   914  	qec.shardingDDLInfo = trackInfos[0]
   915  	job := newDDLJob(qec)
   916  	_, err = ddl.handleJobFunc(job)
   917  	if err != nil {
   918  		return err
   919  	}
   920  
   921  	err = ddl.execError.Load()
   922  	if err != nil {
   923  		ddl.logger.Error("error detected when executing SQL job", log.ShortError(err))
   924  		// nolint:nilerr
   925  		return nil
   926  	}
   927  
   928  	if qec.onlineDDLTable != nil {
   929  		ddl.logger.Info("finish online ddl and clear online ddl metadata in optimistic shard mode",
   930  			zap.String("event", "query"),
   931  			zap.Strings("ddls", qec.needHandleDDLs),
   932  			zap.String("raw statement", qec.originSQL),
   933  			zap.Stringer("table", qec.onlineDDLTable))
   934  		err = ddl.onlineDDL.Finish(qec.tctx, qec.onlineDDLTable)
   935  		if err != nil {
   936  			return terror.Annotatef(err, "finish online ddl on %v", qec.onlineDDLTable)
   937  		}
   938  	}
   939  
   940  	// we don't resolveOptimisticDDL here because it may cause correctness problem
   941  	// There are two cases if we receive ConflictNone here:
   942  	// 1. This shard table is the only shard table on this worker. We don't need to redirect in this case.
   943  	// 2. This shard table isn't the only shard table. The conflicted table before will receive a redirection event.
   944  	// If we resolveOptimisticDDL here, if this ddl event is idempotent, it may falsely resolve the conflict which
   945  	// has a totally different ddl.
   946  
   947  	ddl.logger.Info("finish to handle ddls in optimistic shard mode", zap.String("event", "query"), zap.Stringer("queryEventContext", qec))
   948  	return nil
   949  }
   950  
   951  func parseOneStmt(qec *queryEventContext) (stmt ast.StmtNode, err error) {
   952  	// We use Parse not ParseOneStmt here, because sometimes we got a commented out ddl which can't be parsed
   953  	// by ParseOneStmt(it's a limitation of tidb parser.)
   954  	qec.tctx.L().Info("parse ddl", zap.String("event", "query"), zap.Stringer("query event context", qec))
   955  	stmts, err := parserpkg.Parse(qec.p, qec.originSQL, "", "")
   956  	if err != nil {
   957  		// log error rather than fatal, so other defer can be executed
   958  		qec.tctx.L().Error("parse ddl", zap.String("event", "query"), zap.Stringer("query event context", qec))
   959  		return nil, terror.ErrSyncerParseDDL.Delegate(err, qec.originSQL)
   960  	}
   961  	if len(stmts) == 0 {
   962  		return nil, nil
   963  	}
   964  	return stmts[0], nil
   965  }
   966  
   967  // copy from https://github.com/pingcap/tidb/blob/fc4f8a1d8f5342cd01f78eb460e47d78d177ed20/ddl/column.go#L366
   968  func (ddl *DDLWorker) needChangeColumnData(oldCol, newCol *table.Column) bf.EventType {
   969  	toUnsigned := mysql.HasUnsignedFlag(newCol.GetFlag())
   970  	originUnsigned := mysql.HasUnsignedFlag(oldCol.GetFlag())
   971  	needTruncationOrToggleSign := func() bool {
   972  		return (newCol.GetFlen() > 0 && (newCol.GetFlen() < oldCol.GetFlen() || newCol.GetDecimal() < oldCol.GetDecimal())) ||
   973  			(toUnsigned != originUnsigned)
   974  	}
   975  	// Ignore the potential max display length represented by integer's flen, use default flen instead.
   976  	defaultOldColFlen, _ := mysql.GetDefaultFieldLengthAndDecimal(oldCol.GetType())
   977  	defaultNewColFlen, _ := mysql.GetDefaultFieldLengthAndDecimal(newCol.GetType())
   978  	needTruncationOrToggleSignForInteger := func() bool {
   979  		return (defaultNewColFlen > 0 && defaultNewColFlen < defaultOldColFlen) || (toUnsigned != originUnsigned)
   980  	}
   981  
   982  	// Deal with the same type.
   983  	if oldCol.GetType() == newCol.GetType() {
   984  		switch oldCol.GetType() {
   985  		case mysql.TypeNewDecimal:
   986  			// Since type decimal will encode the precision, frac, negative(signed) and wordBuf into storage together, there is no short
   987  			// cut to eliminate data reorg change for column type change between decimal.
   988  			if oldCol.GetFlen() != newCol.GetFlen() || oldCol.GetDecimal() != newCol.GetDecimal() || toUnsigned != originUnsigned {
   989  				return bf.PrecisionDecrease
   990  			}
   991  			return bf.AlterTable
   992  		case mysql.TypeEnum, mysql.TypeSet:
   993  			if tidbddl.IsElemsChangedToModifyColumn(oldCol.GetElems(), newCol.GetElems()) {
   994  				return bf.ValueRangeDecrease
   995  			}
   996  			return bf.AlterTable
   997  		case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong:
   998  			if toUnsigned != originUnsigned {
   999  				return bf.PrecisionDecrease
  1000  			}
  1001  			return bf.AlterTable
  1002  		case mysql.TypeString:
  1003  			// Due to the behavior of padding \x00 at binary type, always change column data when binary length changed
  1004  			if types.IsBinaryStr(&oldCol.FieldType) {
  1005  				if newCol.GetFlen() != oldCol.GetFlen() {
  1006  					return bf.PrecisionDecrease
  1007  				}
  1008  			}
  1009  			return bf.AlterTable
  1010  		}
  1011  
  1012  		if needTruncationOrToggleSign() {
  1013  			return bf.ValueRangeDecrease
  1014  		}
  1015  		return bf.AlterTable
  1016  	}
  1017  
  1018  	if tidbddl.ConvertBetweenCharAndVarchar(oldCol.GetType(), newCol.GetType()) {
  1019  		return bf.ModifyColumn
  1020  	}
  1021  
  1022  	// Deal with the different type.
  1023  	switch oldCol.GetType() {
  1024  	case mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
  1025  		switch newCol.GetType() {
  1026  		case mysql.TypeVarchar, mysql.TypeString, mysql.TypeVarString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
  1027  			if needTruncationOrToggleSign() {
  1028  				return bf.ModifyColumn
  1029  			}
  1030  			return bf.AlterTable
  1031  		}
  1032  	case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong:
  1033  		switch newCol.GetType() {
  1034  		case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong:
  1035  			if needTruncationOrToggleSignForInteger() {
  1036  				return bf.ValueRangeDecrease
  1037  			}
  1038  			return bf.AlterTable
  1039  		}
  1040  	}
  1041  
  1042  	//  The rest is considered as ModifyColumn.
  1043  	return bf.ModifyColumn
  1044  }
  1045  
  1046  func (ddl *DDLWorker) handleModifyColumn(qec *queryEventContext, info *ddlInfo, spec *ast.AlterTableSpec) (bf.EventType, error) {
  1047  	// return AlterTable if any error happened
  1048  	// avoid panic, won't happen
  1049  	if len(info.sourceTables) == 0 || len(info.targetTables) == 0 {
  1050  		return bf.AlterTable, nil
  1051  	}
  1052  	if len(spec.NewColumns) == 0 || spec.NewColumns[0].Tp == nil {
  1053  		return bf.AlterTable, nil
  1054  	}
  1055  
  1056  	// get table info and db info
  1057  	ti, err := ddl.getTableInfo(qec.tctx, info.sourceTables[0], info.targetTables[0])
  1058  	if err != nil || ti == nil {
  1059  		return bf.AlterTable, err
  1060  	}
  1061  	tbl := tables.MockTableFromMeta(ti)
  1062  	di, err := ddl.getDBInfoFromDownstream(qec.tctx, info.sourceTables[0], info.targetTables[0])
  1063  	if err != nil || di == nil {
  1064  		return bf.AlterTable, err
  1065  	}
  1066  
  1067  	// get old and new column
  1068  	oldColumnName := spec.OldColumnName
  1069  	if spec.Tp == ast.AlterTableModifyColumn {
  1070  		oldColumnName = spec.NewColumns[0].Name
  1071  	}
  1072  	oldCol := table.FindCol(tbl.Cols(), oldColumnName.Name.L)
  1073  	if oldCol == nil {
  1074  		return bf.AlterTable, nil
  1075  	}
  1076  	newCol := table.ToColumn(&model.ColumnInfo{
  1077  		ID:                    oldCol.ID,
  1078  		Offset:                oldCol.Offset,
  1079  		State:                 oldCol.State,
  1080  		OriginDefaultValue:    oldCol.OriginDefaultValue,
  1081  		OriginDefaultValueBit: oldCol.OriginDefaultValueBit,
  1082  		FieldType:             *spec.NewColumns[0].Tp,
  1083  		Name:                  spec.NewColumns[0].Name.Name,
  1084  		Version:               oldCol.Version,
  1085  	})
  1086  
  1087  	// handle charset and collation
  1088  	if err := tidbddl.ProcessColumnCharsetAndCollation(tidbmock.NewContext(), oldCol, newCol, ti, spec.NewColumns[0], di); err != nil {
  1089  		ddl.logger.Warn("process column charset and collation failed", zap.Error(err))
  1090  		return bf.AlterTable, err
  1091  	}
  1092  	// handle column options
  1093  	if err := tidbddl.ProcessModifyColumnOptions(tidbmock.NewContext(), newCol, spec.NewColumns[0].Options); err != nil {
  1094  		ddl.logger.Warn("process column options failed", zap.Error(err))
  1095  		return bf.AlterTable, err
  1096  	}
  1097  
  1098  	if et := ddl.needChangeColumnData(oldCol, newCol); et != bf.AlterTable {
  1099  		return et, nil
  1100  	}
  1101  	switch {
  1102  	case mysql.HasAutoIncrementFlag(oldCol.GetFlag()) && !mysql.HasAutoIncrementFlag(newCol.GetFlag()):
  1103  		return bf.RemoveAutoIncrement, nil
  1104  	case mysql.HasPriKeyFlag(oldCol.GetFlag()) && !mysql.HasPriKeyFlag(newCol.GetFlag()):
  1105  		return bf.DropPrimaryKey, nil
  1106  	case mysql.HasUniKeyFlag(oldCol.GetFlag()) && !mysql.HasUniKeyFlag(newCol.GetFlag()):
  1107  		return bf.DropUniqueKey, nil
  1108  	case oldCol.GetDefaultValue() != newCol.GetDefaultValue():
  1109  		return bf.ModifyDefaultValue, nil
  1110  	case oldCol.GetCharset() != newCol.GetCharset():
  1111  		return bf.ModifyCharset, nil
  1112  	case oldCol.GetCollate() != newCol.GetCollate():
  1113  		return bf.ModifyCollation, nil
  1114  	case spec.Position != nil && spec.Position.Tp != ast.ColumnPositionNone:
  1115  		return bf.ModifyColumnsOrder, nil
  1116  	case oldCol.Name.L != newCol.Name.L:
  1117  		return bf.RenameColumn, nil
  1118  	default:
  1119  		return bf.AlterTable, nil
  1120  	}
  1121  }
  1122  
  1123  // AstToDDLEvent returns filter.DDLEvent.
  1124  func (ddl *DDLWorker) AstToDDLEvent(qec *queryEventContext, info *ddlInfo) (et bf.EventType) {
  1125  	defer func() {
  1126  		ddl.logger.Info("get ddl event type", zap.String("event_type", string(et)))
  1127  	}()
  1128  	node := info.stmtCache
  1129  	switch n := node.(type) {
  1130  	case *ast.AlterTableStmt:
  1131  		validSpecs, err := tidbddl.ResolveAlterTableSpec(tidbmock.NewContext(), n.Specs)
  1132  		if err != nil {
  1133  			break
  1134  		}
  1135  
  1136  		for _, spec := range validSpecs {
  1137  			switch spec.Tp {
  1138  			case ast.AlterTableModifyColumn, ast.AlterTableChangeColumn:
  1139  				et, err := ddl.handleModifyColumn(qec, info, spec)
  1140  				if err != nil {
  1141  					ddl.logger.Warn("handle modify column failed", zap.Error(err))
  1142  				}
  1143  				return et
  1144  			case ast.AlterTableRenameColumn:
  1145  				return bf.RenameColumn
  1146  			case ast.AlterTableRenameIndex:
  1147  				return bf.RenameIndex
  1148  			case ast.AlterTableRenameTable:
  1149  				return bf.RenameTable
  1150  			case ast.AlterTableDropColumn:
  1151  				return bf.DropColumn
  1152  			case ast.AlterTableDropIndex:
  1153  				return bf.DropIndex
  1154  			case ast.AlterTableDropPartition:
  1155  				return bf.DropTablePartition
  1156  			case ast.AlterTableDropPrimaryKey:
  1157  				return bf.DropPrimaryKey
  1158  			case ast.AlterTableTruncatePartition:
  1159  				return bf.TruncateTablePartition
  1160  			case ast.AlterTableAlterColumn:
  1161  				return bf.ModifyDefaultValue
  1162  			case ast.AlterTableAddConstraint:
  1163  				return bf.ModifyConstraint
  1164  			case ast.AlterTableOption:
  1165  				for _, opt := range spec.Options {
  1166  					switch opt.Tp {
  1167  					case ast.TableOptionCharset:
  1168  						return bf.ModifyCharset
  1169  					case ast.TableOptionCollate:
  1170  						return bf.ModifyCollation
  1171  					case ast.TableOptionEngine:
  1172  						return bf.ModifyStorageEngine
  1173  					}
  1174  				}
  1175  			case ast.AlterTableReorganizePartition:
  1176  				return bf.ReorganizePartition
  1177  			case ast.AlterTableRebuildPartition:
  1178  				return bf.RebuildPartition
  1179  			case ast.AlterTableCoalescePartitions:
  1180  				return bf.CoalescePartition
  1181  			case ast.AlterTableExchangePartition:
  1182  				return bf.ExchangePartition
  1183  			}
  1184  		}
  1185  	}
  1186  	return bf.AstToDDLEvent(node)
  1187  }
  1188  
  1189  // skipQueryEvent if skip by binlog-filter:
  1190  // * track the ddlInfo;
  1191  // * changes ddlInfo.originDDL to empty string.
  1192  func (ddl *DDLWorker) skipQueryEvent(qec *queryEventContext, ddlInfo *ddlInfo) (bool, error) {
  1193  	if utils.IsBuildInSkipDDL(qec.originSQL) {
  1194  		return true, nil
  1195  	}
  1196  	et := ddl.AstToDDLEvent(qec, ddlInfo)
  1197  	// get real tables before apply block-allow list
  1198  	realTables := make([]*filter.Table, 0, len(ddlInfo.sourceTables))
  1199  	for _, table := range ddlInfo.sourceTables {
  1200  		realTableName := table.Name
  1201  		if ddl.onlineDDL != nil {
  1202  			realTableName = ddl.onlineDDL.RealName(table.Name)
  1203  		}
  1204  		realTables = append(realTables, &filter.Table{
  1205  			Schema: table.Schema,
  1206  			Name:   realTableName,
  1207  		})
  1208  	}
  1209  	for _, table := range realTables {
  1210  		ddl.logger.Debug("query event info", zap.String("event", "query"), zap.String("origin sql", qec.originSQL), zap.Stringer("table", table), zap.Stringer("ddl info", ddlInfo))
  1211  		if skipByTable(ddl.baList, table) {
  1212  			ddl.logger.Debug("skip event by balist")
  1213  			return true, nil
  1214  		}
  1215  		needSkip, err := skipByFilter(ddl.binlogFilter, table, et, qec.originSQL)
  1216  		if err != nil {
  1217  			return needSkip, err
  1218  		}
  1219  
  1220  		if needSkip {
  1221  			ddl.logger.Debug("skip event by binlog filter")
  1222  			// In the case of online-ddl, if the generated table is skipped, track ddl will failed.
  1223  			err := ddl.trackDDL(qec.ddlSchema, ddlInfo, qec.eventContext)
  1224  			if err != nil {
  1225  				ddl.logger.Warn("track ddl failed", zap.Stringer("ddl info", ddlInfo))
  1226  			}
  1227  			ddl.saveTablePoint(table, qec.lastLocation)
  1228  			ddl.logger.Warn("track skipped ddl and return empty string", zap.String("origin sql", qec.originSQL), zap.Stringer("ddl info", ddlInfo))
  1229  			ddlInfo.originDDL = ""
  1230  			return true, nil
  1231  		}
  1232  	}
  1233  	return false, nil
  1234  }
  1235  
  1236  // processOneDDL processes already split ddl as following step:
  1237  // 1. generate ddl info;
  1238  // 2. skip sql by skipQueryEvent;
  1239  // 3. apply online ddl if onlineDDL is not nil:
  1240  //   - specially, if skip, apply empty string;
  1241  func (ddl *DDLWorker) processOneDDL(qec *queryEventContext, sql string) ([]string, error) {
  1242  	ddlInfo, err := ddl.genDDLInfo(qec, sql)
  1243  	if err != nil {
  1244  		return nil, err
  1245  	}
  1246  
  1247  	if ddl.onlineDDL != nil {
  1248  		if err = ddl.onlineDDL.CheckRegex(ddlInfo.stmtCache, qec.ddlSchema, ddl.sourceTableNamesFlavor); err != nil {
  1249  			return nil, err
  1250  		}
  1251  	}
  1252  
  1253  	qec.tctx.L().Debug("will check skip query event", zap.String("event", "query"), zap.String("statement", sql), zap.Stringer("ddlInfo", ddlInfo))
  1254  	shouldSkip, err := ddl.skipQueryEvent(qec, ddlInfo)
  1255  	if err != nil {
  1256  		return nil, err
  1257  	}
  1258  	if shouldSkip {
  1259  		ddl.metricsProxies.SkipBinlogDurationHistogram.WithLabelValues("query", ddl.name, ddl.sourceID).Observe(time.Since(qec.startTime).Seconds())
  1260  		qec.tctx.L().Warn("skip event", zap.String("event", "query"), zap.String("statement", sql), zap.Stringer("query event context", qec))
  1261  		if ddl.onlineDDL == nil || len(ddlInfo.originDDL) != 0 {
  1262  			return nil, nil
  1263  		}
  1264  	}
  1265  
  1266  	if ddl.onlineDDL == nil {
  1267  		return []string{ddlInfo.originDDL}, nil
  1268  	}
  1269  	// filter and save ghost table ddl
  1270  	sqls, err := ddl.onlineDDL.Apply(qec.tctx, ddlInfo.sourceTables, ddlInfo.originDDL, ddlInfo.stmtCache, qec.p)
  1271  	if err != nil {
  1272  		return nil, err
  1273  	}
  1274  	// represent saved in onlineDDL.Storage
  1275  	if len(sqls) == 0 {
  1276  		return nil, nil
  1277  	}
  1278  	// represent this sql is not online DDL.
  1279  	if sqls[0] == sql {
  1280  		return sqls, nil
  1281  	}
  1282  
  1283  	if qec.onlineDDLTable == nil {
  1284  		qec.onlineDDLTable = ddlInfo.sourceTables[0]
  1285  	} else if qec.onlineDDLTable.String() != ddlInfo.sourceTables[0].String() {
  1286  		return nil, terror.ErrSyncerUnitOnlineDDLOnMultipleTable.Generate(qec.originSQL)
  1287  	}
  1288  	return sqls, nil
  1289  }
  1290  
  1291  // genDDLInfo generates ddl info by given sql.
  1292  func (ddl *DDLWorker) genDDLInfo(qec *queryEventContext, sql string) (*ddlInfo, error) {
  1293  	stmt, err := qec.p.ParseOneStmt(sql, "", "")
  1294  	if err != nil {
  1295  		return nil, terror.Annotatef(terror.ErrSyncerUnitParseStmt.New(err.Error()), "ddl %s", sql)
  1296  	}
  1297  	// get another stmt, one for representing original ddl, one for letting other function modify it.
  1298  	stmt2, _ := qec.p.ParseOneStmt(sql, "", "")
  1299  
  1300  	sourceTables, err := parserpkg.FetchDDLTables(qec.ddlSchema, stmt, ddl.sourceTableNamesFlavor)
  1301  	if err != nil {
  1302  		return nil, err
  1303  	}
  1304  
  1305  	targetTables := make([]*filter.Table, 0, len(sourceTables))
  1306  	for i := range sourceTables {
  1307  		renamedTable := route(ddl.tableRouter, sourceTables[i])
  1308  		targetTables = append(targetTables, renamedTable)
  1309  	}
  1310  
  1311  	ddlInfo := &ddlInfo{
  1312  		originDDL:    sql,
  1313  		originStmt:   stmt,
  1314  		stmtCache:    stmt2,
  1315  		sourceTables: sourceTables,
  1316  		targetTables: targetTables,
  1317  	}
  1318  
  1319  	// "strict" will adjust collation
  1320  	if ddl.collationCompatible == config.StrictCollationCompatible {
  1321  		ddl.adjustCollation(ddlInfo, qec.eventStatusVars, ddl.charsetAndDefaultCollation, ddl.idAndCollationMap)
  1322  	}
  1323  
  1324  	routedDDL, err := parserpkg.RenameDDLTable(ddlInfo.stmtCache, ddlInfo.targetTables)
  1325  	ddlInfo.routedDDL = routedDDL
  1326  	return ddlInfo, err
  1327  }
  1328  
  1329  func (ddl *Pessimist) dropSchemaInSharding(tctx *tcontext.Context, sourceSchema string) error {
  1330  	sources := make(map[string][]*filter.Table)
  1331  	sgs := ddl.sgk.Groups()
  1332  	for name, sg := range sgs {
  1333  		if sg.IsSchemaOnly {
  1334  			// in sharding group leave handling, we always process schema group,
  1335  			// we can ignore schema only group here
  1336  			continue
  1337  		}
  1338  		tables := sg.Tables()
  1339  		for _, table := range tables {
  1340  			if table.Schema != sourceSchema {
  1341  				continue
  1342  			}
  1343  			sources[name] = append(sources[name], table)
  1344  		}
  1345  	}
  1346  	// delete from sharding group firstly
  1347  	for name, tables := range sources {
  1348  		targetTable := utils.UnpackTableID(name)
  1349  		sourceTableIDs := make([]string, 0, len(tables))
  1350  		for _, table := range tables {
  1351  			sourceTableIDs = append(sourceTableIDs, utils.GenTableID(table))
  1352  		}
  1353  		err := ddl.sgk.LeaveGroup(targetTable, sourceTableIDs)
  1354  		if err != nil {
  1355  			return err
  1356  		}
  1357  	}
  1358  	// delete from checkpoint
  1359  	for _, tables := range sources {
  1360  		for _, table := range tables {
  1361  			// refine clear them later if failed
  1362  			// now it doesn't have problems
  1363  			if err1 := ddl.checkpoint.DeleteTablePoint(tctx, table); err1 != nil {
  1364  				ddl.logger.Error("fail to delete checkpoint", zap.Stringer("table", table))
  1365  			}
  1366  		}
  1367  	}
  1368  	return nil
  1369  }
  1370  
  1371  func (ddl *Pessimist) clearOnlineDDL(tctx *tcontext.Context, targetTable *filter.Table) error {
  1372  	group := ddl.sgk.Group(targetTable)
  1373  	if group == nil {
  1374  		return nil
  1375  	}
  1376  
  1377  	// return [[schema, table]...]
  1378  	tables := group.Tables()
  1379  
  1380  	for _, table := range tables {
  1381  		ddl.logger.Info("finish online ddl", zap.Stringer("table", table))
  1382  		err := ddl.onlineDDL.Finish(tctx, table)
  1383  		if err != nil {
  1384  			return terror.Annotatef(err, "finish online ddl on %v", table)
  1385  		}
  1386  	}
  1387  
  1388  	return nil
  1389  }
  1390  
  1391  // adjustCollation adds collation for create database and check create table.
  1392  func (ddl *DDLWorker) adjustCollation(ddlInfo *ddlInfo, statusVars []byte, charsetAndDefaultCollationMap map[string]string, idAndCollationMap map[int]string) {
  1393  	switch createStmt := ddlInfo.stmtCache.(type) {
  1394  	case *ast.CreateTableStmt:
  1395  		if createStmt.ReferTable != nil {
  1396  			return
  1397  		}
  1398  		ddl.adjustColumnsCollation(createStmt, charsetAndDefaultCollationMap)
  1399  		var justCharset string
  1400  		for _, tableOption := range createStmt.Options {
  1401  			// already have 'Collation'
  1402  			if tableOption.Tp == ast.TableOptionCollate {
  1403  				return
  1404  			}
  1405  			if tableOption.Tp == ast.TableOptionCharset {
  1406  				justCharset = tableOption.StrValue
  1407  			}
  1408  		}
  1409  		if justCharset == "" {
  1410  			ddl.logger.Warn("detect create table risk which use implicit charset and collation", zap.String("originSQL", ddlInfo.originDDL))
  1411  			return
  1412  		}
  1413  		// just has charset, can add collation by charset and default collation map
  1414  		collation, ok := charsetAndDefaultCollationMap[strings.ToLower(justCharset)]
  1415  		if !ok {
  1416  			ddl.logger.Warn("not found charset default collation.", zap.String("originSQL", ddlInfo.originDDL), zap.String("charset", strings.ToLower(justCharset)))
  1417  			return
  1418  		}
  1419  		ddl.logger.Info("detect create table risk which use explicit charset and implicit collation, we will add collation by INFORMATION_SCHEMA.COLLATIONS", zap.String("originSQL", ddlInfo.originDDL), zap.String("collation", collation))
  1420  		createStmt.Options = append(createStmt.Options, &ast.TableOption{Tp: ast.TableOptionCollate, StrValue: collation})
  1421  
  1422  	case *ast.CreateDatabaseStmt:
  1423  		var justCharset, collation string
  1424  		var ok bool
  1425  		var err error
  1426  		for _, createOption := range createStmt.Options {
  1427  			// already have 'Collation'
  1428  			if createOption.Tp == ast.DatabaseOptionCollate {
  1429  				return
  1430  			}
  1431  			if createOption.Tp == ast.DatabaseOptionCharset {
  1432  				justCharset = createOption.Value
  1433  			}
  1434  		}
  1435  
  1436  		// just has charset, can add collation by charset and default collation map
  1437  		if justCharset != "" {
  1438  			collation, ok = charsetAndDefaultCollationMap[strings.ToLower(justCharset)]
  1439  			if !ok {
  1440  				ddl.logger.Warn("not found charset default collation.", zap.String("originSQL", ddlInfo.originDDL), zap.String("charset", strings.ToLower(justCharset)))
  1441  				return
  1442  			}
  1443  			ddl.logger.Info("detect create database risk which use explicit charset and implicit collation, we will add collation by INFORMATION_SCHEMA.COLLATIONS", zap.String("originSQL", ddlInfo.originDDL), zap.String("collation", collation))
  1444  		} else {
  1445  			// has no charset and collation
  1446  			// add collation by server collation from binlog statusVars
  1447  			collation, err = event.GetServerCollationByStatusVars(statusVars, idAndCollationMap)
  1448  			if err != nil {
  1449  				ddl.logger.Error("can not get charset server collation from binlog statusVars.", zap.Error(err), zap.String("originSQL", ddlInfo.originDDL))
  1450  			}
  1451  			if collation == "" {
  1452  				ddl.logger.Error("get server collation from binlog statusVars is nil.", zap.Error(err), zap.String("originSQL", ddlInfo.originDDL))
  1453  				return
  1454  			}
  1455  			// add collation
  1456  			ddl.logger.Info("detect create database risk which use implicit charset and collation, we will add collation by binlog status_vars", zap.String("originSQL", ddlInfo.originDDL), zap.String("collation", collation))
  1457  		}
  1458  		createStmt.Options = append(createStmt.Options, &ast.DatabaseOption{Tp: ast.DatabaseOptionCollate, Value: collation})
  1459  	}
  1460  }
  1461  
  1462  // adjustColumnsCollation adds column's collation.
  1463  func (ddl *DDLWorker) adjustColumnsCollation(createStmt *ast.CreateTableStmt, charsetAndDefaultCollationMap map[string]string) {
  1464  ColumnLoop:
  1465  	for _, col := range createStmt.Cols {
  1466  		for _, options := range col.Options {
  1467  			// already have 'Collation'
  1468  			if options.Tp == ast.ColumnOptionCollate {
  1469  				continue ColumnLoop
  1470  			}
  1471  		}
  1472  		fieldType := col.Tp
  1473  		// already have 'Collation'
  1474  		if fieldType.GetCollate() != "" {
  1475  			continue
  1476  		}
  1477  		if fieldType.GetCharset() != "" {
  1478  			// just have charset
  1479  			collation, ok := charsetAndDefaultCollationMap[strings.ToLower(fieldType.GetCharset())]
  1480  			if !ok {
  1481  				ddl.logger.Warn("not found charset default collation for column.", zap.String("table", createStmt.Table.Name.String()), zap.String("column", col.Name.String()), zap.String("charset", strings.ToLower(fieldType.GetCharset())))
  1482  				continue
  1483  			}
  1484  			col.Options = append(col.Options, &ast.ColumnOption{Tp: ast.ColumnOptionCollate, StrValue: collation})
  1485  		}
  1486  	}
  1487  }
  1488  
  1489  type ddlInfo struct {
  1490  	originDDL    string
  1491  	routedDDL    string
  1492  	originStmt   ast.StmtNode
  1493  	stmtCache    ast.StmtNode
  1494  	sourceTables []*filter.Table
  1495  	targetTables []*filter.Table
  1496  }
  1497  
  1498  func (d *ddlInfo) String() string {
  1499  	sourceTables := make([]string, 0, len(d.sourceTables))
  1500  	targetTables := make([]string, 0, len(d.targetTables))
  1501  	for i := range d.sourceTables {
  1502  		sourceTables = append(sourceTables, d.sourceTables[i].String())
  1503  		targetTables = append(targetTables, d.targetTables[i].String())
  1504  	}
  1505  	return fmt.Sprintf("{originDDL: %s, routedDDL: %s, sourceTables: %s, targetTables: %s}",
  1506  		d.originDDL, d.routedDDL, strings.Join(sourceTables, ","), strings.Join(targetTables, ","))
  1507  }