github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/syncer/schema.go (about) 1 // Copyright 2020 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package syncer 15 16 import ( 17 "bytes" 18 "context" 19 "encoding/json" 20 "fmt" 21 "regexp" 22 "strings" 23 24 ddl2 "github.com/pingcap/tidb/pkg/ddl" 25 "github.com/pingcap/tidb/pkg/executor" 26 "github.com/pingcap/tidb/pkg/meta/autoid" 27 "github.com/pingcap/tidb/pkg/parser/ast" 28 "github.com/pingcap/tidb/pkg/parser/format" 29 "github.com/pingcap/tidb/pkg/parser/model" 30 "github.com/pingcap/tidb/pkg/util/filter" 31 "github.com/pingcap/tiflow/dm/config" 32 "github.com/pingcap/tiflow/dm/openapi" 33 "github.com/pingcap/tiflow/dm/pb" 34 "github.com/pingcap/tiflow/dm/pkg/conn" 35 "github.com/pingcap/tiflow/dm/pkg/terror" 36 "github.com/pingcap/tiflow/dm/syncer/dbconn" 37 "github.com/pingcap/tiflow/pkg/quotes" 38 "go.uber.org/zap" 39 ) 40 41 // OperateSchema operates schema for an upstream table. 42 func (s *Syncer) OperateSchema(ctx context.Context, req *pb.OperateWorkerSchemaRequest) (msg string, err error) { 43 sourceTable := &filter.Table{ 44 Schema: req.Database, 45 Name: req.Table, 46 } 47 switch req.Op { 48 case pb.SchemaOp_ListMigrateTargets: 49 return s.listMigrateTargets(req) 50 case pb.SchemaOp_ListSchema: 51 schemaList := s.schemaTracker.AllSchemas() 52 schemaListJSON, err := json.Marshal(schemaList) 53 if err != nil { 54 return "", terror.ErrSchemaTrackerMarshalJSON.Delegate(err, schemaList) 55 } 56 return string(schemaListJSON), err 57 case pb.SchemaOp_ListTable: 58 tables, err := s.schemaTracker.ListSchemaTables(req.Database) 59 if err != nil { 60 return "", err 61 } 62 tableListJSON, err := json.Marshal(tables) 63 if err != nil { 64 return "", terror.ErrSchemaTrackerMarshalJSON.Delegate(err, tables) 65 } 66 return string(tableListJSON), err 67 case pb.SchemaOp_GetSchema: 68 // when task is paused, schemaTracker is closed. We get the table structure from checkpoint. 69 ti := s.checkpoint.GetTableInfo(req.Database, req.Table) 70 if ti == nil { 71 s.tctx.L().Info("table schema is not in checkpoint, fetch from downstream", 72 zap.String("table", sourceTable.String())) 73 targetTable := s.route(sourceTable) 74 result, err2 := dbconn.GetTableCreateSQL(s.tctx.WithContext(ctx), s.downstreamTrackConn, targetTable.String()) 75 result = strings.Replace(result, fmt.Sprintf("CREATE TABLE %s", quotes.QuoteName(targetTable.Name)), fmt.Sprintf("CREATE TABLE %s", quotes.QuoteName(sourceTable.Name)), 1) 76 return conn.CreateTableSQLToOneRow(result), err2 77 } 78 79 result := bytes.NewBuffer(make([]byte, 0, 512)) 80 err2 := executor.ConstructResultOfShowCreateTable(s.sessCtx, ti, autoid.Allocators{}, result) 81 return conn.CreateTableSQLToOneRow(result.String()), err2 82 83 case pb.SchemaOp_SetSchema: 84 // from source or target need get schema 85 if req.FromSource { 86 schema, err := dbconn.GetTableCreateSQL(s.tctx.WithContext(ctx), s.fromConn, sourceTable.String()) 87 if err != nil { 88 return "", err 89 } 90 req.Schema = schema 91 } 92 93 if req.FromTarget { 94 targetTable := s.route(sourceTable) 95 schema, err := dbconn.GetTableCreateSQL(s.tctx.WithContext(ctx), s.downstreamTrackConn, targetTable.String()) 96 if err != nil { 97 return "", err 98 } 99 req.Schema = schema 100 } 101 102 // for set schema, we must ensure it's a valid `CREATE TABLE` statement. 103 // if want to update the one in checkpoint, it should wait for the flush of checkpoint. 104 parser2, err := s.fromDB.GetParser(ctx) 105 if err != nil { 106 return "", err 107 } 108 node, err := parser2.ParseOneStmt(req.Schema, "", "") 109 if err != nil { 110 return "", terror.ErrSchemaTrackerInvalidCreateTableStmt.Delegate(err, req.Schema) 111 } 112 stmt, ok := node.(*ast.CreateTableStmt) 113 if !ok { 114 return "", terror.ErrSchemaTrackerInvalidCreateTableStmt.Generate(req.Schema) 115 } 116 // ensure correct table name. 117 stmt.Table.Schema = model.NewCIStr(req.Database) 118 stmt.Table.Name = model.NewCIStr(req.Table) 119 stmt.IfNotExists = false // we must ensure drop the previous one. 120 121 var newCreateSQLBuilder strings.Builder 122 restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &newCreateSQLBuilder) 123 if err = stmt.Restore(restoreCtx); err != nil { 124 return "", terror.ErrSchemaTrackerRestoreStmtFail.Delegate(err) 125 } 126 newSQL := newCreateSQLBuilder.String() 127 128 s.exprFilterGroup.ResetExprs(sourceTable) 129 130 if !req.Flush { 131 s.tctx.L().Info("overwrite --flush to true for operate-schema") 132 } 133 134 ti, err2 := ddl2.BuildTableInfoFromAST(stmt) 135 if err2 != nil { 136 return "", terror.ErrSchemaTrackerRestoreStmtFail.Delegate(err2) 137 } 138 139 s.tctx.L().Info("flush table info", zap.String("table info", newSQL)) 140 err = s.checkpoint.FlushPointsWithTableInfos(s.tctx.WithContext(ctx), []*filter.Table{sourceTable}, []*model.TableInfo{ti}) 141 if err != nil { 142 return "", err 143 } 144 145 if req.Sync { 146 if s.cfg.ShardMode != config.ShardOptimistic { 147 s.tctx.L().Info("ignore --sync flag", zap.String("shard mode", s.cfg.ShardMode)) 148 break 149 } 150 targetTable := s.route(sourceTable) 151 // use new table info as tableInfoBefore, we can also use the origin table from schemaTracker 152 info := s.optimist.ConstructInfo(req.Database, req.Table, targetTable.Schema, targetTable.Name, []string{""}, ti, []*model.TableInfo{ti}) 153 info.IgnoreConflict = true 154 s.tctx.L().Info("sync info with operate-schema", zap.String("info", info.ShortString())) 155 _, err = s.optimist.PutInfo(info) 156 if err != nil { 157 return "", err 158 } 159 } 160 161 case pb.SchemaOp_RemoveSchema: 162 // as the doc says, `operate-schema remove` will let DM-worker use table structure in checkpoint, which does not 163 // need further actions. 164 return "", nil 165 } 166 return "", nil 167 } 168 169 // listMigrateTargets list all synced schema and table names in tracker. 170 func (s *Syncer) listMigrateTargets(req *pb.OperateWorkerSchemaRequest) (string, error) { 171 var schemaList []string 172 if req.Schema != "" { 173 schemaR, err := regexp.Compile(req.Schema) 174 if err != nil { 175 return "", err 176 } 177 for _, schema := range s.schemaTracker.AllSchemas() { 178 if schemaR.MatchString(schema) { 179 schemaList = append(schemaList, schema) 180 } 181 } 182 } else { 183 schemaList = s.schemaTracker.AllSchemas() 184 } 185 186 var targets []openapi.TaskMigrateTarget 187 routeAndAppendTarget := func(schema, table string) { 188 sourceTable := &filter.Table{Schema: schema, Name: table} 189 targetTable := s.route(sourceTable) 190 if targetTable != nil { 191 targets = append(targets, openapi.TaskMigrateTarget{ 192 SourceSchema: schema, 193 SourceTable: table, 194 TargetSchema: targetTable.Schema, 195 TargetTable: targetTable.Name, 196 }) 197 } 198 } 199 for _, schemaName := range schemaList { 200 tables, err := s.schemaTracker.ListSchemaTables(schemaName) 201 if err != nil { 202 return "", err 203 } 204 if req.Table != "" { 205 tableR, err := regexp.Compile(req.Table) 206 if err != nil { 207 return "", err 208 } 209 for _, tableName := range tables { 210 if tableR.MatchString(tableName) { 211 routeAndAppendTarget(schemaName, tableName) 212 } 213 } 214 } else { 215 for _, tableName := range tables { 216 routeAndAppendTarget(schemaName, tableName) 217 } 218 } 219 } 220 targetsJSON, err := json.Marshal(targets) 221 if err != nil { 222 return "", terror.ErrSchemaTrackerMarshalJSON.Delegate(err, targets) 223 } 224 return string(targetsJSON), err 225 }