github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/pkg/shardddl/optimism/lock_test.go (about) 1 // Copyright 2020 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package optimism 15 16 import ( 17 "encoding/json" 18 "fmt" 19 "testing" 20 21 "github.com/DATA-DOG/go-sqlmock" 22 . "github.com/pingcap/check" 23 "github.com/pingcap/tidb/pkg/parser" 24 "github.com/pingcap/tidb/pkg/parser/ast" 25 "github.com/pingcap/tidb/pkg/parser/model" 26 "github.com/pingcap/tidb/pkg/util/mock" 27 "github.com/pingcap/tidb/pkg/util/schemacmp" 28 "github.com/pingcap/tiflow/dm/config/dbconfig" 29 "github.com/pingcap/tiflow/dm/pkg/conn" 30 "github.com/pingcap/tiflow/dm/pkg/cputil" 31 "github.com/pingcap/tiflow/dm/pkg/log" 32 "github.com/pingcap/tiflow/dm/pkg/terror" 33 "github.com/pingcap/tiflow/dm/pkg/utils" 34 "go.etcd.io/etcd/tests/v3/integration" 35 ) 36 37 type testLock struct{} 38 39 var _ = Suite(&testLock{}) 40 41 func TestLock(t *testing.T) { 42 integration.BeforeTestExternal(t) 43 mockCluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) 44 defer mockCluster.Terminate(t) 45 46 etcdTestCli = mockCluster.RandClient() 47 48 TestingT(t) 49 } 50 51 func (t *testLock) SetUpSuite(c *C) { 52 c.Assert(log.InitLogger(&log.Config{}), IsNil) 53 } 54 55 func (t *testLock) TearDownSuite(c *C) { 56 clearTestInfoOperation(c) 57 } 58 59 func (t *testLock) TestLockTrySyncNormal(c *C) { 60 var ( 61 ID = "test_lock_try_sync_normal-`foo`.`bar`" 62 task = "test_lock_try_sync_normal" 63 sources = []string{"mysql-replica-1", "mysql-replica-2"} 64 downSchema = "db" 65 downTable = "bar" 66 dbs = []string{"db1", "db2"} 67 tbls = []string{"bar1", "bar2"} 68 tableCount = len(sources) * len(dbs) * len(tbls) 69 p = parser.New() 70 se = mock.NewContext() 71 tblID int64 = 111 72 DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c1 INT"} 73 DDLs2 = []string{"ALTER TABLE bar ADD COLUMN c2 BIGINT", "ALTER TABLE bar ADD COLUMN c3 TEXT"} 74 DDLs3 = []string{"ALTER TABLE bar DROP COLUMN c3"} 75 DDLs4 = []string{"ALTER TABLE bar DROP COLUMN c2", "ALTER TABLE bar DROP COLUMN c1"} 76 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 77 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT)`) 78 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, c2 BIGINT, c3 TEXT)`) 79 ti2_1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, c2 BIGINT)`) 80 ti3 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, c2 BIGINT)`) 81 ti4 = ti0 82 ti4_1 = ti1 83 tables = map[string]map[string]struct{}{ 84 dbs[0]: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 85 dbs[1]: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 86 } 87 tts = []TargetTable{ 88 newTargetTable(task, sources[0], downSchema, downTable, tables), 89 newTargetTable(task, sources[1], downSchema, downTable, tables), 90 } 91 92 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 93 94 vers = map[string]map[string]map[string]int64{ 95 sources[0]: { 96 dbs[0]: {tbls[0]: 0, tbls[1]: 0}, 97 dbs[1]: {tbls[0]: 0, tbls[1]: 0}, 98 }, 99 sources[1]: { 100 dbs[0]: {tbls[0]: 0, tbls[1]: 0}, 101 dbs[1]: {tbls[0]: 0, tbls[1]: 0}, 102 }, 103 } 104 ) 105 106 // the initial status is synced. 107 t.checkLockSynced(c, l) 108 t.checkLockNoDone(c, l) 109 110 // CASE: all tables execute a single & same DDL (schema become larger). 111 syncedCount := 0 112 for _, source := range sources { 113 if source == sources[len(sources)-1] { 114 ready := l.Ready() 115 for _, source2 := range sources { 116 synced := source != source2 // tables before the last source have synced. 117 for _, db2 := range dbs { 118 for _, tbl2 := range tbls { 119 c.Assert(ready[source2][db2][tbl2], Equals, synced) 120 } 121 } 122 } 123 } 124 125 for _, db := range dbs { 126 for _, tbl := range tbls { 127 info := newInfoWithVersion(task, source, db, tbl, downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 128 DDLs, cols, err := l.TrySync(info, tts) 129 c.Assert(err, IsNil) 130 c.Assert(DDLs, DeepEquals, DDLs1) 131 c.Assert(cols, DeepEquals, []string{}) 132 c.Assert(l.versions, DeepEquals, vers) 133 134 syncedCount++ 135 synced, remain := l.IsSynced() 136 c.Assert(synced, Equals, syncedCount == tableCount) 137 c.Assert(remain, Equals, tableCount-syncedCount) 138 c.Assert(synced, Equals, l.synced) 139 } 140 } 141 } 142 // synced again after all tables applied the DDL. 143 t.checkLockSynced(c, l) 144 t.checkLockNoDone(c, l) 145 146 // CASE: TrySync again after synced is idempotent. 147 info := newInfoWithVersion(task, sources[0], dbs[0], tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 148 DDLs, cols, err := l.TrySync(info, tts) 149 c.Assert(err, IsNil) 150 c.Assert(DDLs, DeepEquals, DDLs1) 151 c.Assert(cols, DeepEquals, []string{}) 152 c.Assert(l.versions, DeepEquals, vers) 153 t.checkLockSynced(c, l) 154 t.checkLockNoDone(c, l) 155 156 // CASE: need to add more than one DDL to reach the desired schema (schema become larger). 157 // add two columns for one table. 158 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[0], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2_1, ti2}, vers) 159 DDLs, cols, err = l.TrySync(info, tts) 160 c.Assert(err, IsNil) 161 c.Assert(DDLs, DeepEquals, DDLs2) 162 c.Assert(cols, DeepEquals, []string{}) 163 c.Assert(l.versions, DeepEquals, vers) 164 ready := l.Ready() 165 c.Assert(ready[sources[0]][dbs[0]][tbls[0]], IsTrue) 166 c.Assert(ready[sources[0]][dbs[0]][tbls[1]], IsFalse) 167 168 // TrySync again is idempotent (more than one DDL). 169 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[0], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2_1, ti2}, vers) 170 DDLs, cols, err = l.TrySync(info, tts) 171 c.Assert(err, IsNil) 172 c.Assert(DDLs, DeepEquals, DDLs2) 173 c.Assert(cols, DeepEquals, []string{}) 174 c.Assert(l.versions, DeepEquals, vers) 175 ready = l.Ready() 176 c.Assert(ready[sources[0]][dbs[0]][tbls[0]], IsTrue) 177 c.Assert(ready[sources[0]][dbs[0]][tbls[1]], IsFalse) 178 179 // add only the first column for another table. 180 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[1], downSchema, downTable, DDLs2[0:1], ti1, []*model.TableInfo{ti2_1}, vers) 181 DDLs, cols, err = l.TrySync(info, tts) 182 c.Assert(err, IsNil) 183 c.Assert(DDLs, DeepEquals, DDLs2[0:1]) 184 c.Assert(cols, DeepEquals, []string{}) 185 c.Assert(l.versions, DeepEquals, vers) 186 ready = l.Ready() 187 c.Assert(ready[sources[0]][dbs[0]][tbls[0]], IsTrue) 188 c.Assert(ready[sources[0]][dbs[0]][tbls[1]], IsFalse) 189 synced, remain := l.IsSynced() 190 c.Assert(synced, IsFalse) 191 c.Assert(remain, Equals, tableCount-1) 192 c.Assert(synced, Equals, l.synced) 193 cmp, err := l.tables[sources[0]][dbs[0]][tbls[0]].Compare(l.tables[sources[0]][dbs[0]][tbls[1]]) 194 c.Assert(err, IsNil) 195 c.Assert(cmp, Equals, 1) 196 197 // TrySync again is idempotent 198 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[1], downSchema, downTable, DDLs2[0:1], ti1, []*model.TableInfo{ti2_1}, vers) 199 DDLs, cols, err = l.TrySync(info, tts) 200 c.Assert(err, IsNil) 201 c.Assert(DDLs, DeepEquals, DDLs2[0:1]) 202 c.Assert(cols, DeepEquals, []string{}) 203 c.Assert(l.versions, DeepEquals, vers) 204 ready = l.Ready() 205 c.Assert(ready[sources[0]][dbs[0]][tbls[0]], IsTrue) 206 c.Assert(ready[sources[0]][dbs[0]][tbls[1]], IsFalse) 207 208 // add the second column for another table. 209 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[1], downSchema, downTable, DDLs2[1:2], ti2_1, []*model.TableInfo{ti2}, vers) 210 DDLs, cols, err = l.TrySync(info, tts) 211 c.Assert(err, IsNil) 212 c.Assert(DDLs, DeepEquals, DDLs2[1:2]) 213 c.Assert(cols, DeepEquals, []string{}) 214 c.Assert(l.versions, DeepEquals, vers) 215 ready = l.Ready() 216 c.Assert(ready[sources[0]][dbs[0]][tbls[1]], IsTrue) // ready now. 217 synced, remain = l.IsSynced() 218 c.Assert(synced, IsFalse) 219 c.Assert(remain, Equals, tableCount-2) 220 c.Assert(synced, Equals, l.synced) 221 cmp, err = l.tables[sources[0]][dbs[0]][tbls[0]].Compare(l.tables[sources[0]][dbs[0]][tbls[1]]) 222 c.Assert(err, IsNil) 223 c.Assert(cmp, Equals, 0) 224 225 // Try again (for the second DDL). 226 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[1], downSchema, downTable, DDLs2[1:2], ti2_1, []*model.TableInfo{ti2}, vers) 227 DDLs, cols, err = l.TrySync(info, tts) 228 c.Assert(err, IsNil) 229 c.Assert(DDLs, DeepEquals, DDLs2[1:2]) 230 c.Assert(cols, DeepEquals, []string{}) 231 c.Assert(l.versions, DeepEquals, vers) 232 233 t.trySyncForAllTablesLarger(c, l, DDLs2, ti1, []*model.TableInfo{ti2_1, ti2}, tts, vers) 234 t.checkLockSynced(c, l) 235 t.checkLockNoDone(c, l) 236 237 // CASE: all tables execute a single & same DDL (schema become smaller). 238 syncedCount = 0 239 for _, source := range sources { 240 if source == sources[len(sources)-1] { 241 ready = l.Ready() 242 for _, source2 := range sources { 243 synced = source == source2 // tables before the last source have not synced. 244 for _, db2 := range dbs { 245 for _, tbl2 := range tbls { 246 c.Assert(ready[source2][db2][tbl2], Equals, synced) 247 } 248 } 249 } 250 } 251 252 for _, db := range dbs { 253 for _, tbl := range tbls { 254 syncedCount++ 255 info = newInfoWithVersion(task, source, db, tbl, downSchema, downTable, DDLs3, ti2, []*model.TableInfo{ti3}, vers) 256 DDLs, cols, err = l.TrySync(info, tts) 257 c.Assert(err, IsNil) 258 c.Assert(l.versions, DeepEquals, vers) 259 c.Assert(cols, DeepEquals, []string{"c3"}) 260 synced, remain = l.IsSynced() 261 c.Assert(synced, Equals, l.synced) 262 if syncedCount == tableCount { 263 c.Assert(DDLs, DeepEquals, DDLs3) 264 c.Assert(synced, IsTrue) 265 c.Assert(remain, Equals, 0) 266 } else { 267 c.Assert(DDLs, DeepEquals, []string{}) 268 c.Assert(synced, IsFalse) 269 c.Assert(remain, Equals, syncedCount) 270 } 271 } 272 } 273 } 274 t.checkLockSynced(c, l) 275 t.checkLockNoDone(c, l) 276 277 // CASE: need to drop more than one DDL to reach the desired schema (schema become smaller). 278 // drop two columns for one table. 279 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[0], downSchema, downTable, DDLs4, ti3, []*model.TableInfo{ti4_1, ti4}, vers) 280 DDLs, cols, err = l.TrySync(info, tts) 281 c.Assert(err, IsNil) 282 c.Assert(DDLs, DeepEquals, []string{}) 283 c.Assert(cols, DeepEquals, []string{"c2", "c1"}) 284 c.Assert(l.versions, DeepEquals, vers) 285 ready = l.Ready() 286 c.Assert(ready[sources[0]][dbs[0]][tbls[0]], IsFalse) 287 c.Assert(ready[sources[0]][dbs[0]][tbls[1]], IsTrue) 288 289 // TrySync again is idempotent. 290 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[0], downSchema, downTable, DDLs4, ti3, []*model.TableInfo{ti4_1, ti4}, vers) 291 DDLs, cols, err = l.TrySync(info, tts) 292 c.Assert(err, IsNil) 293 c.Assert(DDLs, DeepEquals, []string{}) 294 c.Assert(cols, DeepEquals, []string{"c2", "c1"}) 295 c.Assert(l.versions, DeepEquals, vers) 296 ready = l.Ready() 297 c.Assert(ready[sources[0]][dbs[0]][tbls[0]], IsFalse) 298 c.Assert(ready[sources[0]][dbs[0]][tbls[1]], IsTrue) 299 300 // drop only the first column for another table. 301 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[1], downSchema, downTable, DDLs4[0:1], ti3, []*model.TableInfo{ti4_1}, vers) 302 DDLs, cols, err = l.TrySync(info, tts) 303 c.Assert(err, IsNil) 304 c.Assert(DDLs, DeepEquals, []string{}) 305 c.Assert(cols, DeepEquals, []string{"c2"}) 306 c.Assert(l.versions, DeepEquals, vers) 307 ready = l.Ready() 308 c.Assert(ready[sources[0]][dbs[0]][tbls[0]], IsFalse) 309 c.Assert(ready[sources[0]][dbs[0]][tbls[1]], IsFalse) 310 cmp, err = l.tables[sources[0]][dbs[0]][tbls[0]].Compare(l.tables[sources[0]][dbs[0]][tbls[1]]) 311 c.Assert(err, IsNil) 312 c.Assert(cmp, Equals, -1) 313 314 // TrySync again (only the first DDL). 315 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[1], downSchema, downTable, DDLs4[0:1], ti3, []*model.TableInfo{ti4_1}, vers) 316 DDLs, cols, err = l.TrySync(info, tts) 317 c.Assert(err, IsNil) 318 c.Assert(DDLs, DeepEquals, []string{}) 319 c.Assert(cols, DeepEquals, []string{"c2"}) 320 c.Assert(l.versions, DeepEquals, vers) 321 322 // drop the second column for another table. 323 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[1], downSchema, downTable, DDLs4[1:2], ti4_1, []*model.TableInfo{ti4}, vers) 324 DDLs, cols, err = l.TrySync(info, tts) 325 c.Assert(err, IsNil) 326 c.Assert(DDLs, DeepEquals, []string{}) 327 c.Assert(cols, DeepEquals, []string{"c1"}) 328 c.Assert(l.versions, DeepEquals, vers) 329 ready = l.Ready() 330 c.Assert(ready[sources[0]][dbs[0]][tbls[0]], IsFalse) 331 c.Assert(ready[sources[0]][dbs[0]][tbls[1]], IsFalse) 332 cmp, err = l.tables[sources[0]][dbs[0]][tbls[0]].Compare(l.tables[sources[0]][dbs[0]][tbls[1]]) 333 c.Assert(err, IsNil) 334 c.Assert(cmp, Equals, 0) 335 336 // TrySync again (for the second DDL). 337 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[1], downSchema, downTable, DDLs4[1:2], ti4_1, []*model.TableInfo{ti4}, vers) 338 DDLs, cols, err = l.TrySync(info, tts) 339 c.Assert(err, IsNil) 340 c.Assert(DDLs, DeepEquals, []string{}) 341 c.Assert(cols, DeepEquals, []string{"c1"}) 342 c.Assert(l.versions, DeepEquals, vers) 343 344 // try drop columns for other tables to reach the same schema. 345 remain = tableCount - 2 346 for source, schemaTables := range l.Ready() { 347 for schema, tables := range schemaTables { 348 for table, synced2 := range tables { 349 if synced2 { // do not `TrySync` again for previous two (un-synced now). 350 info = newInfoWithVersion(task, source, schema, table, downSchema, downTable, DDLs4, ti3, []*model.TableInfo{ti4_1, ti4}, vers) 351 DDLs, cols, err = l.TrySync(info, tts) 352 c.Assert(err, IsNil) 353 c.Assert(cols, DeepEquals, []string{"c2", "c1"}) 354 c.Assert(l.versions, DeepEquals, vers) 355 remain-- 356 if remain == 0 { 357 c.Assert(DDLs, DeepEquals, DDLs4) 358 } else { 359 c.Assert(DDLs, DeepEquals, []string{}) 360 } 361 } 362 } 363 } 364 } 365 t.checkLockSynced(c, l) 366 t.checkLockNoDone(c, l) 367 } 368 369 func (t *testLock) TestLockTrySyncIndex(c *C) { 370 // nolint:dupl 371 var ( 372 ID = "test_lock_try_sync_index-`foo`.`bar`" 373 task = "test_lock_try_sync_index" 374 source = "mysql-replica-1" 375 downSchema = "db" 376 downTable = "bar" 377 db = "db" 378 tbls = []string{"bar1", "bar2"} 379 p = parser.New() 380 se = mock.NewContext() 381 tblID int64 = 111 382 DDLs1 = []string{"ALTER TABLE bar DROP INDEX idx_c1"} 383 DDLs2 = []string{"ALTER TABLE bar ADD UNIQUE INDEX idx_c1(c1)"} 384 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, UNIQUE INDEX idx_c1(c1))`) 385 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT)`) 386 ti2 = ti0 387 tables = map[string]map[string]struct{}{ 388 db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 389 } 390 tts = []TargetTable{ 391 newTargetTable(task, source, downSchema, downTable, tables), 392 } 393 394 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 395 396 vers = map[string]map[string]map[string]int64{ 397 source: { 398 db: {tbls[0]: 0, tbls[1]: 0}, 399 }, 400 } 401 ) 402 403 // the initial status is synced. 404 t.checkLockSynced(c, l) 405 t.checkLockNoDone(c, l) 406 407 // try sync for one table, `DROP INDEX` returned directly (to make schema become more compatible). 408 // `DROP INDEX` is handled like `ADD COLUMN`. 409 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 410 DDLs, cols, err := l.TrySync(info, tts) 411 c.Assert(err, IsNil) 412 c.Assert(DDLs, DeepEquals, DDLs1) 413 c.Assert(cols, DeepEquals, []string{}) 414 c.Assert(l.versions, DeepEquals, vers) 415 synced, remain := l.IsSynced() 416 c.Assert(synced, Equals, l.synced) 417 c.Assert(synced, IsFalse) 418 c.Assert(remain, Equals, 1) 419 420 // try sync for another table, also got `DROP INDEX` now. 421 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 422 DDLs, cols, err = l.TrySync(info, tts) 423 c.Assert(err, IsNil) 424 c.Assert(DDLs, DeepEquals, DDLs1) 425 c.Assert(cols, DeepEquals, []string{}) 426 c.Assert(l.versions, DeepEquals, vers) 427 t.checkLockSynced(c, l) 428 429 // try sync for one table, `ADD INDEX` not returned directly (to keep the schema more compatible). 430 // `ADD INDEX` is handled like `DROP COLUMN`. 431 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2}, vers) 432 DDLs, cols, err = l.TrySync(info, tts) 433 c.Assert(err, IsNil) 434 c.Assert(DDLs, DeepEquals, []string{}) // no DDLs returned 435 c.Assert(cols, DeepEquals, []string{}) 436 c.Assert(l.versions, DeepEquals, vers) 437 synced, remain = l.IsSynced() 438 c.Assert(synced, Equals, l.synced) 439 c.Assert(synced, IsFalse) 440 c.Assert(remain, Equals, 1) 441 442 // try sync for another table, got `ADD INDEX` now. 443 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2}, vers) 444 DDLs, cols, err = l.TrySync(info, tts) 445 c.Assert(err, IsNil) 446 c.Assert(DDLs, DeepEquals, DDLs2) 447 c.Assert(cols, DeepEquals, []string{}) 448 c.Assert(l.versions, DeepEquals, vers) 449 t.checkLockSynced(c, l) 450 } 451 452 func (t *testLock) TestLockTrySyncNullNotNull(c *C) { 453 // nolint:dupl 454 var ( 455 ID = "test_lock_try_sync_null_not_null-`foo`.`bar`" 456 task = "test_lock_try_sync_null_not_null" 457 source = "mysql-replica-1" 458 downSchema = "db" 459 downTable = "bar" 460 db = "db" 461 tbls = []string{"bar1", "bar2"} 462 p = parser.New() 463 se = mock.NewContext() 464 tblID int64 = 111 465 DDLs1 = []string{"ALTER TABLE bar MODIFY COLUMN c1 INT NOT NULL DEFAULT 1234"} 466 DDLs2 = []string{"ALTER TABLE bar MODIFY COLUMN c1 INT NULL DEFAULT 1234"} 467 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT NULL DEFAULT 1234)`) 468 ti1 = createTableInfo(c, p, se, tblID, 469 `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT NOT NULL DEFAULT 1234)`) 470 ti2 = ti0 471 tables = map[string]map[string]struct{}{ 472 db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 473 } 474 tts = []TargetTable{ 475 newTargetTable(task, source, downSchema, downTable, tables), 476 } 477 478 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 479 480 vers = map[string]map[string]map[string]int64{ 481 source: { 482 db: {tbls[0]: 0, tbls[1]: 0}, 483 }, 484 } 485 ) 486 487 // the initial status is synced. 488 t.checkLockSynced(c, l) 489 t.checkLockNoDone(c, l) 490 491 for i := 0; i < 2; i++ { // two round 492 // try sync for one table, from `NULL` to `NOT NULL`, no DDLs returned. 493 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 494 DDLs, cols, err := l.TrySync(info, tts) 495 c.Assert(err, IsNil) 496 c.Assert(DDLs, DeepEquals, []string{}) 497 c.Assert(cols, DeepEquals, []string{}) 498 c.Assert(l.versions, DeepEquals, vers) 499 500 // try sync for another table, DDLs returned. 501 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 502 DDLs, cols, err = l.TrySync(info, tts) 503 c.Assert(err, IsNil) 504 c.Assert(DDLs, DeepEquals, DDLs1) 505 c.Assert(cols, DeepEquals, []string{}) 506 c.Assert(l.versions, DeepEquals, vers) 507 508 // try sync for one table, from `NOT NULL` to `NULL`, DDLs returned. 509 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2}, vers) 510 DDLs, cols, err = l.TrySync(info, tts) 511 c.Assert(err, IsNil) 512 c.Assert(DDLs, DeepEquals, DDLs2) 513 c.Assert(cols, DeepEquals, []string{}) 514 c.Assert(l.versions, DeepEquals, vers) 515 516 // try sync for another table, from `NOT NULL` to `NULL`, DDLs, returned. 517 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2}, vers) 518 DDLs, cols, err = l.TrySync(info, tts) 519 c.Assert(err, IsNil) 520 c.Assert(DDLs, DeepEquals, DDLs2) 521 c.Assert(cols, DeepEquals, []string{}) 522 c.Assert(l.versions, DeepEquals, vers) 523 } 524 } 525 526 func (t *testLock) TestLockTrySyncIntBigint(c *C) { 527 var ( 528 ID = "test_lock_try_sync_int_bigint-`foo`.`bar`" 529 task = "test_lock_try_sync_int_bigint" 530 source = "mysql-replica-1" 531 downSchema = "db" 532 downTable = "bar" 533 db = "db" 534 tbls = []string{"bar1", "bar2"} 535 p = parser.New() 536 se = mock.NewContext() 537 tblID int64 = 111 538 DDLs1 = []string{"ALTER TABLE bar MODIFY COLUMN c1 BIGINT NOT NULL DEFAULT 1234"} 539 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT NOT NULL DEFAULT 1234)`) 540 ti1 = createTableInfo(c, p, se, tblID, 541 `CREATE TABLE bar (id INT PRIMARY KEY, c1 BIGINT NOT NULL DEFAULT 1234)`) 542 tables = map[string]map[string]struct{}{ 543 db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 544 } 545 tts = []TargetTable{ 546 newTargetTable(task, source, downSchema, downTable, tables), 547 } 548 549 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 550 551 vers = map[string]map[string]map[string]int64{ 552 source: { 553 db: {tbls[0]: 0, tbls[1]: 0}, 554 }, 555 } 556 ) 557 558 // the initial status is synced. 559 t.checkLockSynced(c, l) 560 t.checkLockNoDone(c, l) 561 562 // try sync for one table, from `INT` to `BIGINT`, DDLs returned. 563 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 564 DDLs, cols, err := l.TrySync(info, tts) 565 c.Assert(err, IsNil) 566 c.Assert(DDLs, DeepEquals, DDLs1) 567 c.Assert(cols, DeepEquals, []string{}) 568 c.Assert(l.versions, DeepEquals, vers) 569 570 // try sync for another table, DDLs returned. 571 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 572 DDLs, cols, err = l.TrySync(info, tts) 573 c.Assert(err, IsNil) 574 c.Assert(DDLs, DeepEquals, DDLs1) 575 c.Assert(cols, DeepEquals, []string{}) 576 c.Assert(l.versions, DeepEquals, vers) 577 } 578 579 func (t *testLock) TestLockTrySyncNoDiff(c *C) { 580 var ( 581 ID = "test_lock_try_sync_no_diff-`foo`.`bar`" 582 task = "test_lock_try_sync_no_diff" 583 source = "mysql-replica-1" 584 downSchema = "db" 585 downTable = "bar" 586 db = "db" 587 tbls = []string{"bar1", "bar2"} 588 p = parser.New() 589 se = mock.NewContext() 590 tblID int64 = 111 591 DDLs1 = []string{"ALTER TABLE bar RENAME c1 TO c2"} 592 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT)`) 593 ti1 = createTableInfo(c, p, se, tblID, 594 `CREATE TABLE bar (id INT PRIMARY KEY, c2 INT)`) // `c1` dropped, `c2` added 595 tables = map[string]map[string]struct{}{ 596 db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 597 } 598 tts = []TargetTable{ 599 newTargetTable(task, source, downSchema, downTable, tables), 600 } 601 602 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 603 604 vers = map[string]map[string]map[string]int64{ 605 source: { 606 db: {tbls[0]: 0, tbls[1]: 0}, 607 }, 608 } 609 ) 610 611 // the initial status is synced. 612 t.checkLockSynced(c, l) 613 t.checkLockNoDone(c, l) 614 615 // try sync for one table. 616 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 617 DDLs, cols, err := l.TrySync(info, tts) 618 c.Assert(terror.ErrShardDDLOptimismNeedSkipAndRedirect.Equal(err), IsTrue) 619 c.Assert(DDLs, DeepEquals, []string{}) 620 c.Assert(cols, DeepEquals, []string{}) 621 c.Assert(l.versions, DeepEquals, vers) 622 } 623 624 func (t *testLock) TestLockTrySyncNewTable(c *C) { 625 var ( 626 ID = "test_lock_try_sync_new_table-`foo`.`bar`" 627 task = "test_lock_try_sync_new_table" 628 source1 = "mysql-replica-1" 629 source2 = "mysql-replica-2" 630 downSchema = "foo" 631 downTable = "bar" 632 db1 = "foo1" 633 db2 = "foo2" 634 tbl1 = "bar1" 635 tbl2 = "bar2" 636 p = parser.New() 637 se = mock.NewContext() 638 tblID int64 = 111 639 DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c1 INT"} 640 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 641 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT)`) 642 643 tables = map[string]map[string]struct{}{db1: {tbl1: struct{}{}}} 644 tts = []TargetTable{newTargetTable(task, source1, downSchema, downTable, tables)} 645 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 646 vers = map[string]map[string]map[string]int64{ 647 source1: { 648 db1: {tbl1: 0}, 649 }, 650 source2: { 651 db2: {tbl2: 0}, 652 }, 653 } 654 ) 655 656 // only one table exists before TrySync. 657 t.checkLockSynced(c, l) 658 t.checkLockNoDone(c, l) 659 660 // TrySync for a new table as the caller. 661 info := newInfoWithVersion(task, source2, db2, tbl2, downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 662 DDLs, cols, err := l.TrySync(info, tts) 663 c.Assert(err, IsNil) 664 c.Assert(DDLs, DeepEquals, DDLs1) 665 c.Assert(cols, DeepEquals, []string{}) 666 c.Assert(l.versions, DeepEquals, vers) 667 668 ready := l.Ready() 669 c.Assert(ready, HasLen, 2) 670 c.Assert(ready[source1], HasLen, 1) 671 c.Assert(ready[source1][db1], HasLen, 1) 672 c.Assert(ready[source1][db1][tbl1], IsFalse) 673 c.Assert(ready[source2], HasLen, 1) 674 c.Assert(ready[source2][db2], HasLen, 1) 675 c.Assert(ready[source2][db2][tbl2], IsTrue) 676 677 // TrySync for two new tables as extra sources. 678 // newly added work table use tableInfoBefore as table info 679 tts = append(tts, 680 newTargetTable(task, source1, downSchema, downTable, map[string]map[string]struct{}{db1: {tbl2: struct{}{}}}), 681 newTargetTable(task, source2, downTable, downTable, map[string]map[string]struct{}{db2: {tbl1: struct{}{}}}), 682 ) 683 vers[source1][db1][tbl2] = 0 684 vers[source2][db2][tbl1] = 0 685 686 info = newInfoWithVersion(task, source1, db1, tbl1, downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 687 DDLs, cols, err = l.TrySync(info, tts) 688 c.Assert(err, IsNil) 689 c.Assert(DDLs, DeepEquals, DDLs1) 690 c.Assert(cols, DeepEquals, []string{}) 691 c.Assert(l.versions, DeepEquals, vers) 692 693 ready = l.Ready() 694 c.Assert(ready, HasLen, 2) 695 c.Assert(ready[source1], HasLen, 1) 696 c.Assert(ready[source1][db1], HasLen, 2) 697 c.Assert(ready[source1][db1][tbl1], IsTrue) 698 c.Assert(ready[source1][db1][tbl2], IsFalse) // new table use ti0 as init table 699 c.Assert(ready[source2], HasLen, 1) 700 c.Assert(ready[source2][db2], HasLen, 2) 701 c.Assert(ready[source2][db2][tbl1], IsFalse) 702 c.Assert(ready[source2][db2][tbl2], IsTrue) 703 704 info = newInfoWithVersion(task, source1, db1, tbl2, downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 705 DDLs, cols, err = l.TrySync(info, tts) 706 c.Assert(err, IsNil) 707 c.Assert(DDLs, DeepEquals, DDLs1) 708 c.Assert(cols, DeepEquals, []string{}) 709 c.Assert(l.versions, DeepEquals, vers) 710 711 ready = l.Ready() 712 c.Assert(ready, HasLen, 2) 713 c.Assert(ready[source1], HasLen, 1) 714 c.Assert(ready[source1][db1], HasLen, 2) 715 c.Assert(ready[source1][db1][tbl1], IsTrue) 716 c.Assert(ready[source1][db1][tbl2], IsTrue) 717 c.Assert(ready[source2], HasLen, 1) 718 c.Assert(ready[source2][db2], HasLen, 2) 719 c.Assert(ready[source2][db2][tbl1], IsFalse) 720 c.Assert(ready[source2][db2][tbl2], IsTrue) 721 } 722 723 func (t *testLock) TestLockTrySyncRevert(c *C) { 724 var ( 725 ID = "test_lock_try_sync_revert-`foo`.`bar`" 726 task = "test_lock_try_sync_revert" 727 source = "mysql-replica-1" 728 downSchema = "foo" 729 downTable = "bar" 730 db = "foo" 731 tbls = []string{"bar1", "bar2"} 732 p = parser.New() 733 se = mock.NewContext() 734 tblID int64 = 111 735 736 DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c1 TEXT"} 737 DDLs2 = []string{"ALTER TABLE bar DROP COLUMN c1"} 738 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 739 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT)`) 740 ti2 = ti0 741 742 DDLs3 = []string{"ALTER TABLE bar ADD COLUMN c1 TEXT", "ALTER TABLE bar ADD COLUMN c2 INT"} 743 DDLs4 = []string{"ALTER TABLE bar DROP COLUMN c2"} 744 DDLs5 = []string{"ALTER TABLE bar DROP COLUMN c1"} 745 ti3 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT, c2 INT)`) 746 ti4 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT)`) 747 ti5 = ti0 748 749 DDLs6 = DDLs3 750 DDLs7 = DDLs4 751 DDLs8 = []string{"ALTER TABLE bar ADD COLUMN c1 TEXT"} 752 ti6 = ti3 753 ti7 = ti4 754 ti8 = ti4 755 756 tables = map[string]map[string]struct{}{db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}} 757 tts = []TargetTable{newTargetTable(task, source, downSchema, downTable, tables)} 758 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 759 760 vers = map[string]map[string]map[string]int64{ 761 source: { 762 db: {tbls[0]: 0, tbls[1]: 0}, 763 }, 764 } 765 ) 766 767 // the initial status is synced. 768 t.checkLockSynced(c, l) 769 t.checkLockNoDone(c, l) 770 771 // CASE: revert for single DDL. 772 // TrySync for one table. 773 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 774 DDLs, cols, err := l.TrySync(info, tts) 775 c.Assert(err, IsNil) 776 c.Assert(DDLs, DeepEquals, DDLs1) 777 c.Assert(cols, DeepEquals, []string{}) 778 c.Assert(l.versions, DeepEquals, vers) 779 ready := l.Ready() 780 c.Assert(ready[source][db][tbls[0]], IsTrue) 781 c.Assert(ready[source][db][tbls[1]], IsFalse) 782 783 joined, err := l.Joined() 784 c.Assert(err, IsNil) 785 cmp, err := l.tables[source][db][tbls[0]].Compare(joined) 786 c.Assert(err, IsNil) 787 c.Assert(cmp, Equals, 0) 788 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 789 c.Assert(err, IsNil) 790 c.Assert(cmp, Equals, -1) 791 792 // revert for the table, become synced again. 793 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2}, vers) 794 DDLs, cols, err = l.TrySync(info, tts) 795 c.Assert(err, IsNil) 796 c.Assert(DDLs, DeepEquals, DDLs2) 797 c.Assert(cols, DeepEquals, []string{"c1"}) 798 c.Assert(l.versions, DeepEquals, vers) 799 t.checkLockSynced(c, l) 800 t.checkLockNoDone(c, l) 801 802 // Simulate watch done operation from dm-worker 803 op := NewOperation(utils.GenDDLLockID(task, downSchema, downTable), task, source, db, tbls[0], DDLs2, ConflictNone, "", true, []string{"c1"}) 804 c.Assert(l.DeleteColumnsByOp(op), IsNil) 805 806 // CASE: revert for multiple DDLs. 807 // TrySync for one table. 808 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs3, ti0, []*model.TableInfo{ti4, ti3}, vers) 809 DDLs, cols, err = l.TrySync(info, tts) 810 c.Assert(err, IsNil) 811 c.Assert(DDLs, DeepEquals, DDLs3) 812 c.Assert(cols, DeepEquals, []string{}) 813 c.Assert(l.versions, DeepEquals, vers) 814 ready = l.Ready() 815 c.Assert(ready[source][db][tbls[0]], IsTrue) 816 c.Assert(ready[source][db][tbls[1]], IsFalse) 817 joined, err = l.Joined() 818 c.Assert(err, IsNil) 819 cmp, err = l.tables[source][db][tbls[0]].Compare(joined) 820 c.Assert(err, IsNil) 821 c.Assert(cmp, Equals, 0) 822 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 823 c.Assert(err, IsNil) 824 c.Assert(cmp, Equals, -1) 825 826 // revert part of the DDLs. 827 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs4, ti3, []*model.TableInfo{ti4}, vers) 828 DDLs, cols, err = l.TrySync(info, tts) 829 c.Assert(err, IsNil) 830 c.Assert(DDLs, DeepEquals, DDLs4) 831 c.Assert(cols, DeepEquals, []string{"c2"}) 832 c.Assert(l.versions, DeepEquals, vers) 833 ready = l.Ready() 834 c.Assert(ready[source][db][tbls[1]], IsFalse) 835 joined, err = l.Joined() 836 c.Assert(err, IsNil) 837 cmp, err = l.tables[source][db][tbls[0]].Compare(joined) 838 c.Assert(err, IsNil) 839 c.Assert(cmp, Equals, 0) 840 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 841 c.Assert(err, IsNil) 842 c.Assert(cmp, Equals, -1) 843 844 // revert the reset part of the DDLs. 845 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs5, ti4, []*model.TableInfo{ti5}, vers) 846 DDLs, cols, err = l.TrySync(info, tts) 847 c.Assert(err, IsNil) 848 c.Assert(DDLs, DeepEquals, DDLs5) 849 c.Assert(cols, DeepEquals, []string{"c1"}) 850 c.Assert(l.versions, DeepEquals, vers) 851 t.checkLockSynced(c, l) 852 t.checkLockNoDone(c, l) 853 854 // Simulate watch done operation from dm-worker 855 op = NewOperation(utils.GenDDLLockID(task, downSchema, downTable), task, source, db, tbls[0], DDLs4, ConflictNone, "", true, []string{"c2"}) 856 c.Assert(l.DeleteColumnsByOp(op), IsNil) 857 op = NewOperation(utils.GenDDLLockID(task, downSchema, downTable), task, source, db, tbls[0], DDLs5, ConflictNone, "", true, []string{"c1"}) 858 c.Assert(l.DeleteColumnsByOp(op), IsNil) 859 860 // CASE: revert part of multiple DDLs. 861 // TrySync for one table. 862 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs6, ti0, []*model.TableInfo{ti7, ti6}, vers) 863 DDLs, cols, err = l.TrySync(info, tts) 864 c.Assert(err, IsNil) 865 c.Assert(DDLs, DeepEquals, DDLs6) 866 c.Assert(cols, DeepEquals, []string{}) 867 c.Assert(l.versions, DeepEquals, vers) 868 ready = l.Ready() 869 c.Assert(ready[source][db][tbls[1]], IsFalse) 870 871 joined, err = l.Joined() 872 c.Assert(err, IsNil) 873 cmp, err = l.tables[source][db][tbls[0]].Compare(joined) 874 c.Assert(err, IsNil) 875 c.Assert(cmp, Equals, 0) 876 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 877 c.Assert(err, IsNil) 878 c.Assert(cmp, Equals, -1) 879 880 // revert part of the DDLs. 881 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs7, ti3, []*model.TableInfo{ti7}, vers) 882 DDLs, cols, err = l.TrySync(info, tts) 883 c.Assert(err, IsNil) 884 c.Assert(DDLs, DeepEquals, DDLs7) 885 c.Assert(cols, DeepEquals, []string{"c2"}) 886 c.Assert(l.versions, DeepEquals, vers) 887 ready = l.Ready() 888 c.Assert(ready[source][db][tbls[1]], IsFalse) 889 890 joined, err = l.Joined() 891 c.Assert(err, IsNil) 892 cmp, err = l.tables[source][db][tbls[0]].Compare(joined) 893 c.Assert(err, IsNil) 894 c.Assert(cmp, Equals, 0) 895 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 896 c.Assert(err, IsNil) 897 c.Assert(cmp, Equals, -1) 898 899 // TrySync for another table. 900 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs8, ti0, []*model.TableInfo{ti8}, vers) 901 DDLs, cols, err = l.TrySync(info, tts) 902 c.Assert(err, IsNil) 903 c.Assert(DDLs, DeepEquals, DDLs8) 904 c.Assert(cols, DeepEquals, []string{}) 905 c.Assert(l.versions, DeepEquals, vers) 906 t.checkLockSynced(c, l) 907 t.checkLockNoDone(c, l) 908 } 909 910 func (t *testLock) TestLockTrySyncConflictNonIntrusive(c *C) { 911 var ( 912 ID = "test_lock_try_sync_conflict_non_intrusive-`foo`.`bar`" 913 task = "test_lock_try_sync_conflict_non_intrusive" 914 source = "mysql-replica-1" 915 downSchema = "foo" 916 downTable = "bar" 917 db = "foo" 918 tbls = []string{"bar1", "bar2"} 919 p = parser.New() 920 se = mock.NewContext() 921 tblID int64 = 111 922 DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c1 TEXT"} 923 DDLs2 = []string{"ALTER TABLE bar ADD COLUMN c1 DATETIME", "ALTER TABLE bar ADD COLUMN c2 INT"} 924 DDLs3 = []string{"ALTER TABLE bar DROP COLUMN c1"} 925 DDLs4 = DDLs2 926 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 927 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT)`) 928 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 DATETIME, c2 INT)`) 929 ti2_1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 DATETIME)`) 930 ti3 = ti0 931 ti4 = ti2 932 ti4_1 = ti2_1 933 934 tables = map[string]map[string]struct{}{db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}} 935 tts = []TargetTable{newTargetTable(task, source, downSchema, downTable, tables)} 936 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 937 938 vers = map[string]map[string]map[string]int64{ 939 source: { 940 db: {tbls[0]: 0, tbls[1]: 0}, 941 }, 942 } 943 ) 944 945 // the initial status is synced. 946 t.checkLockSynced(c, l) 947 t.checkLockNoDone(c, l) 948 949 // TrySync for the first table, construct the joined schema. 950 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 951 DDLs, cols, err := l.TrySync(info, tts) 952 c.Assert(err, IsNil) 953 c.Assert(DDLs, DeepEquals, DDLs1) 954 c.Assert(cols, DeepEquals, []string{}) 955 c.Assert(l.versions, DeepEquals, vers) 956 ready := l.Ready() 957 c.Assert(ready[source][db][tbls[0]], IsTrue) 958 c.Assert(ready[source][db][tbls[1]], IsFalse) 959 joined, err := l.Joined() 960 c.Assert(err, IsNil) 961 cmp, err := l.tables[source][db][tbls[0]].Compare(joined) 962 c.Assert(err, IsNil) 963 c.Assert(cmp, Equals, 0) 964 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 965 c.Assert(err, IsNil) 966 c.Assert(cmp, Equals, -1) 967 968 // TrySync for the second table with another schema (add two columns, one of them will cause conflict). 969 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti0, []*model.TableInfo{ti2_1, ti2}, vers) 970 DDLs, cols, err = l.TrySync(info, tts) 971 c.Assert(terror.ErrShardDDLOptimismTrySyncFail.Equal(err), IsTrue) 972 c.Assert(DDLs, DeepEquals, []string{}) 973 c.Assert(cols, DeepEquals, []string{}) 974 c.Assert(l.versions, DeepEquals, vers) 975 joined, err = l.Joined() 976 c.Assert(err, IsNil) 977 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 978 // join table isn't updated 979 c.Assert(err, IsNil) 980 c.Assert(cmp, Equals, -1) 981 ready = l.Ready() 982 c.Assert(ready[source][db][tbls[1]], IsFalse) 983 984 // TrySync for the first table to resolve the conflict. 985 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs3, ti1, []*model.TableInfo{ti3}, vers) 986 DDLs, cols, err = l.TrySync(info, tts) 987 c.Assert(err, IsNil) 988 c.Assert(DDLs, DeepEquals, DDLs3) 989 c.Assert(cols, DeepEquals, []string{"c1"}) 990 c.Assert(l.versions, DeepEquals, vers) 991 ready = l.Ready() // all table ready 992 c.Assert(ready[source][db][tbls[0]], IsTrue) 993 c.Assert(ready[source][db][tbls[1]], IsTrue) 994 joined, err = l.Joined() 995 c.Assert(err, IsNil) 996 cmp, err = l.tables[source][db][tbls[0]].Compare(joined) 997 c.Assert(err, IsNil) 998 c.Assert(cmp, Equals, 0) 999 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1000 c.Assert(err, IsNil) 1001 c.Assert(cmp, Equals, 0) 1002 1003 // TrySync for the second table, succeed now 1004 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti0, []*model.TableInfo{ti2_1, ti2}, vers) 1005 DDLs, cols, err = l.TrySync(info, tts) 1006 c.Assert(err, IsNil) 1007 c.Assert(DDLs, DeepEquals, DDLs2) 1008 c.Assert(cols, DeepEquals, []string{}) 1009 c.Assert(l.versions, DeepEquals, vers) 1010 joined, err = l.Joined() 1011 c.Assert(err, IsNil) 1012 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1013 c.Assert(err, IsNil) 1014 c.Assert(cmp, Equals, 0) 1015 ready = l.Ready() 1016 c.Assert(ready[source][db][tbls[1]], IsTrue) 1017 1018 // Simulate watch done operation from dm-worker 1019 op := NewOperation(utils.GenDDLLockID(task, downSchema, downTable), task, source, db, tbls[0], DDLs3, ConflictNone, "", true, []string{"c1"}) 1020 c.Assert(l.DeleteColumnsByOp(op), IsNil) 1021 1022 // TrySync for the first table. 1023 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs4, ti0, []*model.TableInfo{ti4_1, ti4}, vers) 1024 DDLs, cols, err = l.TrySync(info, tts) 1025 c.Assert(err, IsNil) 1026 c.Assert(DDLs, DeepEquals, DDLs4) 1027 c.Assert(cols, DeepEquals, []string{}) 1028 c.Assert(l.versions, DeepEquals, vers) 1029 t.checkLockSynced(c, l) 1030 t.checkLockNoDone(c, l) 1031 } 1032 1033 func (t *testLock) TestLockTrySyncConflictIntrusive(c *C) { 1034 var ( 1035 ID = "test_lock_try_sync_conflict_intrusive-`foo`.`bar`" 1036 task = "test_lock_try_sync_conflict_intrusive" 1037 source = "mysql-replica-1" 1038 downSchema = "foo" 1039 downTable = "bar" 1040 db = "foo" 1041 tbls = []string{"bar1", "bar2"} 1042 p = parser.New() 1043 se = mock.NewContext() 1044 tblID int64 = 111 1045 DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c1 TEXT"} 1046 DDLs2 = []string{"ALTER TABLE bar ADD COLUMN c1 DATETIME", "ALTER TABLE bar ADD COLUMN c2 INT"} 1047 DDLs3 = []string{"ALTER TABLE bar ADD COLUMN c1 DATETIME"} 1048 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 1049 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT)`) 1050 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 DATETIME, c2 INT)`) 1051 ti3 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 DATETIME)`) 1052 1053 DDLs5 = []string{"ALTER TABLE bar ADD COLUMN c2 TEXT"} 1054 DDLs6 = []string{"ALTER TABLE bar ADD COLUMN c2 DATETIME", "ALTER TABLE bar ADD COLUMN c3 INT"} 1055 DDLs7 = []string{"ALTER TABLE bar ADD COLUMN c3 INT"} 1056 DDLs8_1 = DDLs7 1057 DDLs8_2 = DDLs5 1058 ti5 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT, c2 TEXT)`) 1059 ti6 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT, c2 DATETIME, c3 INT)`) 1060 ti6_1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT, c2 DATETIME)`) 1061 ti7 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT, c3 INT)`) 1062 ti8 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT, c2 TEXT, c3 INT)`) 1063 1064 tables = map[string]map[string]struct{}{db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}} 1065 tts = []TargetTable{newTargetTable(task, source, downSchema, downTable, tables)} 1066 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 1067 1068 vers = map[string]map[string]map[string]int64{ 1069 source: { 1070 db: {tbls[0]: 0, tbls[1]: 0}, 1071 }, 1072 } 1073 ) 1074 1075 // the initial status is synced. 1076 t.checkLockSynced(c, l) 1077 t.checkLockNoDone(c, l) 1078 1079 // CASE: conflict happen, revert all changes to resolve the conflict. 1080 // TrySync for the first table, construct the joined schema. 1081 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 1082 DDLs, cols, err := l.TrySync(info, tts) 1083 c.Assert(err, IsNil) 1084 c.Assert(DDLs, DeepEquals, DDLs1) 1085 c.Assert(cols, DeepEquals, []string{}) 1086 c.Assert(l.versions, DeepEquals, vers) 1087 ready := l.Ready() 1088 c.Assert(ready[source][db][tbls[0]], IsTrue) 1089 c.Assert(ready[source][db][tbls[1]], IsFalse) 1090 joined, err := l.Joined() 1091 c.Assert(err, IsNil) 1092 cmp, err := l.tables[source][db][tbls[0]].Compare(joined) 1093 c.Assert(err, IsNil) 1094 c.Assert(cmp, Equals, 0) 1095 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1096 c.Assert(err, IsNil) 1097 c.Assert(cmp, Equals, -1) 1098 1099 // TrySync for the second table with another schema (add two columns, one of them will cause conflict). 1100 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti0, []*model.TableInfo{ti3, ti2}, vers) 1101 DDLs, cols, err = l.TrySync(info, tts) 1102 c.Assert(terror.ErrShardDDLOptimismTrySyncFail.Equal(err), IsTrue) 1103 c.Assert(DDLs, DeepEquals, []string{}) 1104 c.Assert(cols, DeepEquals, []string{}) 1105 c.Assert(l.versions, DeepEquals, vers) 1106 joined, err = l.Joined() 1107 c.Assert(err, IsNil) 1108 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1109 // join table isn't updated 1110 c.Assert(err, IsNil) 1111 c.Assert(cmp, Equals, -1) 1112 ready = l.Ready() 1113 c.Assert(ready[source][db][tbls[1]], IsFalse) 1114 1115 // TrySync again. 1116 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti0, []*model.TableInfo{ti3, ti2}, vers) 1117 DDLs, cols, err = l.TrySync(info, tts) 1118 c.Assert(terror.ErrShardDDLOptimismTrySyncFail.Equal(err), IsTrue) 1119 c.Assert(DDLs, DeepEquals, []string{}) 1120 c.Assert(cols, DeepEquals, []string{}) 1121 c.Assert(l.versions, DeepEquals, vers) 1122 joined, err = l.Joined() 1123 c.Assert(err, IsNil) 1124 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1125 c.Assert(err, IsNil) 1126 c.Assert(cmp, Equals, -1) 1127 1128 // TrySync for the second table to replace a new ddl without non-conflict column, the conflict should still exist. 1129 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs3, ti0, []*model.TableInfo{ti3}, vers) 1130 DDLs, cols, err = l.TrySync(info, tts) 1131 c.Assert(terror.ErrShardDDLOptimismTrySyncFail.Equal(err), IsTrue) 1132 c.Assert(DDLs, DeepEquals, []string{}) 1133 c.Assert(cols, DeepEquals, []string{}) 1134 c.Assert(l.versions, DeepEquals, vers) 1135 joined, err = l.Joined() 1136 c.Assert(err, IsNil) 1137 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1138 c.Assert(err, IsNil) 1139 c.Assert(cmp, Equals, -1) 1140 ready = l.Ready() 1141 c.Assert(ready[source][db][tbls[1]], IsFalse) 1142 1143 // TrySync for the second table as we did for the first table, the lock should be synced. 1144 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 1145 DDLs, cols, err = l.TrySync(info, tts) 1146 c.Assert(err, IsNil) 1147 c.Assert(DDLs, DeepEquals, DDLs) 1148 c.Assert(cols, DeepEquals, []string{}) 1149 c.Assert(l.versions, DeepEquals, vers) 1150 joined, err = l.Joined() 1151 c.Assert(err, IsNil) 1152 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1153 c.Assert(err, IsNil) 1154 c.Assert(cmp, Equals, 0) 1155 t.checkLockSynced(c, l) 1156 t.checkLockNoDone(c, l) 1157 1158 // CASE: conflict happen, revert part of changes to resolve the conflict. 1159 // TrySync for the first table, construct the joined schema. 1160 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs5, ti1, []*model.TableInfo{ti5}, vers) 1161 DDLs, cols, err = l.TrySync(info, tts) 1162 c.Assert(err, IsNil) 1163 c.Assert(DDLs, DeepEquals, DDLs5) 1164 c.Assert(cols, DeepEquals, []string{}) 1165 c.Assert(l.versions, DeepEquals, vers) 1166 ready = l.Ready() 1167 c.Assert(ready[source][db][tbls[0]], IsTrue) 1168 c.Assert(ready[source][db][tbls[1]], IsFalse) 1169 joined, err = l.Joined() 1170 c.Assert(err, IsNil) 1171 cmp, err = l.tables[source][db][tbls[0]].Compare(joined) 1172 c.Assert(err, IsNil) 1173 c.Assert(cmp, Equals, 0) 1174 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1175 c.Assert(err, IsNil) 1176 c.Assert(cmp, Equals, -1) 1177 1178 // TrySync for the second table with another schema (add two columns, one of them will cause conflict). 1179 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs6, ti1, []*model.TableInfo{ti6_1, ti6}, vers) 1180 DDLs, cols, err = l.TrySync(info, tts) 1181 c.Assert(terror.ErrShardDDLOptimismTrySyncFail.Equal(err), IsTrue) 1182 c.Assert(DDLs, DeepEquals, []string{}) 1183 c.Assert(cols, DeepEquals, []string{}) 1184 joined, err = l.Joined() 1185 c.Assert(err, IsNil) 1186 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1187 c.Assert(err, IsNil) 1188 c.Assert(cmp, Equals, -1) 1189 c.Assert(l.versions, DeepEquals, vers) 1190 ready = l.Ready() 1191 c.Assert(ready[source][db][tbls[1]], IsFalse) 1192 1193 // TrySync for the second table to replace a new ddl without conflict column, the conflict should be resolved. 1194 // but both of tables are not synced now. 1195 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs7, ti1, []*model.TableInfo{ti7}, vers) 1196 DDLs, cols, err = l.TrySync(info, tts) 1197 c.Assert(err, IsNil) 1198 c.Assert(DDLs, DeepEquals, DDLs7) 1199 c.Assert(cols, DeepEquals, []string{}) 1200 c.Assert(l.versions, DeepEquals, vers) 1201 ready = l.Ready() 1202 c.Assert(ready[source][db][tbls[0]], IsFalse) 1203 c.Assert(ready[source][db][tbls[1]], IsFalse) 1204 joined, err = l.Joined() 1205 c.Assert(err, IsNil) 1206 cmp, err = l.tables[source][db][tbls[0]].Compare(joined) 1207 c.Assert(err, IsNil) 1208 c.Assert(cmp, Equals, -1) 1209 cmp, err = l.tables[source][db][tbls[1]].Compare(joined) 1210 c.Assert(err, IsNil) 1211 c.Assert(cmp, Equals, -1) 1212 1213 // TrySync for the first table to become synced. 1214 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs8_1, ti5, []*model.TableInfo{ti8}, vers) 1215 DDLs, cols, err = l.TrySync(info, tts) 1216 c.Assert(err, IsNil) 1217 c.Assert(DDLs, DeepEquals, DDLs8_1) 1218 c.Assert(cols, DeepEquals, []string{}) 1219 ready = l.Ready() 1220 c.Assert(ready[source][db][tbls[0]], IsTrue) 1221 1222 // TrySync for the second table to become synced. 1223 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs8_2, ti7, []*model.TableInfo{ti8}, vers) 1224 DDLs, cols, err = l.TrySync(info, tts) 1225 c.Assert(err, IsNil) 1226 c.Assert(DDLs, DeepEquals, DDLs8_2) 1227 c.Assert(cols, DeepEquals, []string{}) 1228 ready = l.Ready() 1229 c.Assert(ready[source][db][tbls[1]], IsTrue) 1230 1231 // all tables synced now. 1232 t.checkLockSynced(c, l) 1233 t.checkLockNoDone(c, l) 1234 } 1235 1236 func (t *testLock) TestLockTrySyncMultipleChangeDDL(c *C) { 1237 var ( 1238 ID = "test_lock_try_sync_normal-`foo`.`bar`" 1239 task = "test_lock_try_sync_normal" 1240 sources = []string{"mysql-replica-1", "mysql-replica-2"} 1241 downSchema = "db" 1242 downTable = "bar" 1243 dbs = []string{"db1", "db2"} 1244 tbls = []string{"bar1", "bar2"} 1245 tableCount = len(sources) * len(dbs) * len(tbls) 1246 p = parser.New() 1247 se = mock.NewContext() 1248 tblID int64 = 111 1249 DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c2 INT", "ALTER TABLE bar DROP COLUMN c1"} 1250 DDLs2 = []string{"ALTER TABLE bar DROP COLUMN c2", "ALTER TABLE bar ADD COLUMN c3 TEXT"} 1251 // DDLs3 = []string{"ALTER TABLE bar DROP COLUMN c3"} 1252 // DDLs4 = []string{"ALTER TABLE bar DROP COLUMN c2", "ALTER TABLE bar DROP COLUMN c1"} 1253 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT)`) 1254 ti1_1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, c2 INT)`) 1255 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c2 INT)`) 1256 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c3 TEXT)`) 1257 ti2_1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 1258 // ti3 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, c2 BIGINT)`) 1259 // ti4 = ti0 1260 // ti4_1 = ti1 1261 tables = map[string]map[string]struct{}{ 1262 dbs[0]: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 1263 dbs[1]: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 1264 } 1265 tts = []TargetTable{ 1266 newTargetTable(task, sources[0], downSchema, downTable, tables), 1267 newTargetTable(task, sources[1], downSchema, downTable, tables), 1268 } 1269 1270 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 1271 1272 vers = map[string]map[string]map[string]int64{ 1273 sources[0]: { 1274 dbs[0]: {tbls[0]: 0, tbls[1]: 0}, 1275 dbs[1]: {tbls[0]: 0, tbls[1]: 0}, 1276 }, 1277 sources[1]: { 1278 dbs[0]: {tbls[0]: 0, tbls[1]: 0}, 1279 dbs[1]: {tbls[0]: 0, tbls[1]: 0}, 1280 }, 1281 } 1282 ) 1283 1284 // the initial status is synced. 1285 t.checkLockSynced(c, l) 1286 t.checkLockNoDone(c, l) 1287 1288 // inconsistent ddls and table infos 1289 info := newInfoWithVersion(task, sources[0], dbs[0], tbls[0], downSchema, downTable, DDLs1[:1], ti0, []*model.TableInfo{ti1_1, ti1}, vers) 1290 DDLs, cols, err := l.TrySync(info, tts) 1291 c.Assert(DDLs, DeepEquals, []string{}) 1292 c.Assert(cols, DeepEquals, []string{}) 1293 c.Assert(terror.ErrMasterInconsistentOptimisticDDLsAndInfo.Equal(err), IsTrue) 1294 1295 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 1296 DDLs, cols, err = l.TrySync(info, tts) 1297 c.Assert(DDLs, DeepEquals, []string{}) 1298 c.Assert(cols, DeepEquals, []string{}) 1299 c.Assert(terror.ErrMasterInconsistentOptimisticDDLsAndInfo.Equal(err), IsTrue) 1300 1301 t.checkLockSynced(c, l) 1302 t.checkLockNoDone(c, l) 1303 1304 // CASE: all tables execute a same multiple change DDLs1 1305 syncedCount := 0 1306 resultDDLs1 := map[string]map[string]map[string][]string{ 1307 sources[0]: { 1308 dbs[0]: {tbls[0]: DDLs1[:1], tbls[1]: DDLs1[:1]}, 1309 dbs[1]: {tbls[0]: DDLs1[:1], tbls[1]: DDLs1[:1]}, 1310 }, 1311 sources[1]: { 1312 dbs[0]: {tbls[0]: DDLs1[:1], tbls[1]: DDLs1[:1]}, 1313 dbs[1]: {tbls[0]: DDLs1[:1], tbls[1]: DDLs1}, // only last table sync DROP COLUMN 1314 }, 1315 } 1316 for _, source := range sources { 1317 for _, db := range dbs { 1318 for _, tbl := range tbls { 1319 info = newInfoWithVersion(task, source, db, tbl, downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1_1, ti1}, vers) 1320 DDLs, cols, err = l.TrySync(info, tts) 1321 c.Assert(err, IsNil) 1322 c.Assert(DDLs, DeepEquals, resultDDLs1[source][db][tbl]) 1323 c.Assert(cols, DeepEquals, []string{"c1"}) 1324 c.Assert(l.versions, DeepEquals, vers) 1325 1326 syncedCount++ 1327 synced, _ := l.IsSynced() 1328 c.Assert(synced, Equals, syncedCount == tableCount) 1329 c.Assert(synced, Equals, l.synced) 1330 } 1331 } 1332 } 1333 // synced again after all tables applied the DDL. 1334 t.checkLockSynced(c, l) 1335 t.checkLockNoDone(c, l) 1336 1337 // CASE: TrySync again after synced is idempotent. 1338 // both ddl will sync again 1339 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1_1, ti1}, vers) 1340 DDLs, cols, err = l.TrySync(info, tts) 1341 c.Assert(err, IsNil) 1342 c.Assert(DDLs, DeepEquals, DDLs1) 1343 c.Assert(cols, DeepEquals, []string{"c1"}) 1344 c.Assert(l.versions, DeepEquals, vers) 1345 t.checkLockSynced(c, l) 1346 t.checkLockNoDone(c, l) 1347 1348 // CASE: all tables execute a same multiple change DDLs2 1349 syncedCount = 0 1350 resultDDLs2 := map[string]map[string]map[string][]string{ 1351 sources[0]: { 1352 dbs[0]: {tbls[0]: DDLs2[1:], tbls[1]: DDLs2[1:]}, 1353 dbs[1]: {tbls[0]: DDLs2[1:], tbls[1]: DDLs2[1:]}, 1354 }, 1355 sources[1]: { 1356 dbs[0]: {tbls[0]: DDLs2[1:], tbls[1]: DDLs2[1:]}, 1357 dbs[1]: {tbls[0]: DDLs2[1:], tbls[1]: DDLs2}, // only last table sync DROP COLUMN 1358 }, 1359 } 1360 for _, source := range sources { 1361 for _, db := range dbs { 1362 for _, tbl := range tbls { 1363 info = newInfoWithVersion(task, source, db, tbl, downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2_1, ti2}, vers) 1364 DDLs, cols, err = l.TrySync(info, tts) 1365 c.Assert(err, IsNil) 1366 c.Assert(DDLs, DeepEquals, resultDDLs2[source][db][tbl]) 1367 c.Assert(cols, DeepEquals, []string{"c2"}) 1368 c.Assert(l.versions, DeepEquals, vers) 1369 1370 syncedCount++ 1371 synced, _ := l.IsSynced() 1372 c.Assert(synced, Equals, syncedCount == tableCount) 1373 c.Assert(synced, Equals, l.synced) 1374 } 1375 } 1376 } 1377 // synced again after all tables applied the DDL. 1378 t.checkLockSynced(c, l) 1379 t.checkLockNoDone(c, l) 1380 1381 // CASE: TrySync again after synced is idempotent. 1382 info = newInfoWithVersion(task, sources[0], dbs[0], tbls[0], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2_1, ti2}, vers) 1383 DDLs, cols, err = l.TrySync(info, tts) 1384 c.Assert(err, IsNil) 1385 c.Assert(DDLs, DeepEquals, DDLs2) 1386 c.Assert(cols, DeepEquals, []string{"c2"}) 1387 c.Assert(l.versions, DeepEquals, vers) 1388 t.checkLockSynced(c, l) 1389 t.checkLockNoDone(c, l) 1390 } 1391 1392 func (t *testLock) TestTryRemoveTable(c *C) { 1393 var ( 1394 ID = "test_lock_try_remove_table-`foo`.`bar`" 1395 task = "test_lock_try_remove_table" 1396 source = "mysql-replica-1" 1397 downSchema = "foo" 1398 downTable = "bar" 1399 db = "foo" 1400 tbl1 = "bar1" 1401 tbl2 = "bar2" 1402 p = parser.New() 1403 se = mock.NewContext() 1404 tblID int64 = 111 1405 DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c1 INT"} 1406 DDLs2 = []string{"ALTER TABLE bar ADD COLUMN c2 INT"} 1407 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 1408 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT)`) 1409 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, c2 INT)`) 1410 1411 tables = map[string]map[string]struct{}{db: {tbl1: struct{}{}, tbl2: struct{}{}}} 1412 tts = []TargetTable{newTargetTable(task, source, downSchema, downTable, tables)} 1413 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 1414 1415 vers = map[string]map[string]map[string]int64{ 1416 source: { 1417 db: {tbl1: 0, tbl2: 0}, 1418 }, 1419 } 1420 ) 1421 1422 // only one table exists before TrySync. 1423 t.checkLockSynced(c, l) 1424 t.checkLockNoDone(c, l) 1425 1426 // CASE: remove a table as normal. 1427 // TrySync for the first table. 1428 info := newInfoWithVersion(task, source, db, tbl1, downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 1429 DDLs, cols, err := l.TrySync(info, tts) 1430 c.Assert(err, IsNil) 1431 c.Assert(DDLs, DeepEquals, DDLs1) 1432 c.Assert(cols, DeepEquals, []string{}) 1433 c.Assert(l.versions, DeepEquals, vers) 1434 ready := l.Ready() 1435 c.Assert(ready, HasLen, 1) 1436 c.Assert(ready[source], HasLen, 1) 1437 c.Assert(ready[source][db], HasLen, 2) 1438 c.Assert(ready[source][db][tbl1], IsTrue) 1439 c.Assert(ready[source][db][tbl2], IsFalse) 1440 1441 // TryRemoveTable for the second table. 1442 l.columns = map[string]map[string]map[string]map[string]DropColumnStage{ 1443 "col": { 1444 source: {db: {tbl2: DropNotDone}}, 1445 }, 1446 } 1447 col := l.TryRemoveTable(source, db, tbl2) 1448 c.Assert(col, DeepEquals, []string{"col"}) 1449 delete(vers[source][db], tbl2) 1450 ready = l.Ready() 1451 c.Assert(ready, HasLen, 1) 1452 c.Assert(ready[source], HasLen, 1) 1453 c.Assert(ready[source][db], HasLen, 1) 1454 c.Assert(ready[source][db][tbl1], IsTrue) 1455 c.Assert(l.versions, DeepEquals, vers) 1456 1457 // CASE: remove a table will rebuild joined schema now. 1458 // TrySync to add the second back. 1459 vers[source][db][tbl2] = 0 1460 info = newInfoWithVersion(task, source, db, tbl2, downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2}, vers) 1461 DDLs, cols, err = l.TrySync(info, tts) 1462 c.Assert(err, IsNil) 1463 c.Assert(DDLs, DeepEquals, DDLs2) 1464 c.Assert(cols, DeepEquals, []string{}) 1465 c.Assert(l.versions, DeepEquals, vers) 1466 ready = l.Ready() 1467 c.Assert(ready, HasLen, 1) 1468 c.Assert(ready[source], HasLen, 1) 1469 c.Assert(ready[source][db], HasLen, 2) 1470 c.Assert(ready[source][db][tbl1], IsFalse) 1471 c.Assert(ready[source][db][tbl2], IsTrue) 1472 1473 // TryRemoveTable for the second table. 1474 c.Assert(l.TryRemoveTable(source, db, tbl2), HasLen, 0) 1475 delete(vers[source][db], tbl2) 1476 ready = l.Ready() 1477 c.Assert(ready, HasLen, 1) 1478 c.Assert(ready[source], HasLen, 1) 1479 c.Assert(ready[source][db], HasLen, 1) 1480 c.Assert(ready[source][db][tbl1], IsTrue) // the joined schema is rebuild. 1481 c.Assert(l.versions, DeepEquals, vers) 1482 1483 // CASE: try to remove for not-exists table. 1484 c.Assert(l.TryRemoveTable(source, db, "not-exist"), HasLen, 0) 1485 c.Assert(l.TryRemoveTable(source, "not-exist", tbl1), HasLen, 0) 1486 c.Assert(l.TryRemoveTable("not-exist", db, tbl1), HasLen, 0) 1487 } 1488 1489 func (t *testLock) TestTryRemoveTableWithSources(c *C) { 1490 var ( 1491 ID = "test_lock_try_remove_table-`foo`.`bar`" 1492 task = "test_lock_try_remove_table" 1493 source1 = "mysql-replica-1" 1494 source2 = "mysql-replica-2" 1495 downSchema = "foo" 1496 downTable = "bar" 1497 db = "foo" 1498 tbl1 = "bar1" 1499 tbl2 = "bar2" 1500 p = parser.New() 1501 se = mock.NewContext() 1502 tblID int64 = 111 1503 DDLs1 = []string{"ALTER TABLE bar DROP COLUMN c1"} 1504 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT)`) 1505 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 1506 1507 tables = map[string]map[string]struct{}{db: {tbl1: struct{}{}, tbl2: struct{}{}}} 1508 tts = []TargetTable{newTargetTable(task, source1, downSchema, downTable, tables), newTargetTable(task, source2, downSchema, downTable, tables)} 1509 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 1510 1511 vers = map[string]map[string]map[string]int64{ 1512 source1: { 1513 db: {tbl1: 0, tbl2: 0}, 1514 }, 1515 source2: { 1516 db: {tbl1: 0, tbl2: 0}, 1517 }, 1518 } 1519 ) 1520 1521 // only one table exists before TrySync. 1522 t.checkLockSynced(c, l) 1523 t.checkLockNoDone(c, l) 1524 1525 // TrySync for the first table. 1526 info := newInfoWithVersion(task, source1, db, tbl1, downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 1527 DDLs, cols, err := l.TrySync(info, tts) 1528 c.Assert(err, IsNil) 1529 c.Assert(DDLs, DeepEquals, []string{}) 1530 c.Assert(cols, DeepEquals, []string{"c1"}) 1531 c.Assert(l.versions, DeepEquals, vers) 1532 ready := l.Ready() 1533 c.Assert(ready, HasLen, 2) 1534 c.Assert(ready[source1], HasLen, 1) 1535 c.Assert(ready[source1][db], HasLen, 2) 1536 c.Assert(ready[source1][db][tbl1], IsFalse) 1537 c.Assert(ready[source1][db][tbl2], IsTrue) 1538 c.Assert(ready[source2], HasLen, 1) 1539 c.Assert(ready[source2][db], HasLen, 2) 1540 c.Assert(ready[source2][db][tbl1], IsTrue) 1541 c.Assert(ready[source2][db][tbl2], IsTrue) 1542 1543 // TryRemoveTableBySources with nil 1544 c.Assert(len(l.TryRemoveTableBySources(nil)), Equals, 0) 1545 ready = l.Ready() 1546 c.Assert(ready, HasLen, 2) 1547 1548 // TryRemoveTableBySources with wrong source 1549 tts = tts[:1] 1550 c.Assert(len(l.TryRemoveTableBySources([]string{"hahaha"})), Equals, 0) 1551 ready = l.Ready() 1552 c.Assert(ready, HasLen, 2) 1553 1554 // TryRemoveTableBySources with source2 1555 c.Assert(len(l.TryRemoveTableBySources([]string{source2})), Equals, 0) 1556 ready = l.Ready() 1557 c.Assert(ready, HasLen, 1) 1558 c.Assert(ready[source1], HasLen, 1) 1559 c.Assert(ready[source1][db], HasLen, 2) 1560 c.Assert(ready[source1][db][tbl1], IsFalse) 1561 c.Assert(ready[source1][db][tbl2], IsTrue) 1562 delete(vers, source2) 1563 c.Assert(l.versions, DeepEquals, vers) 1564 c.Assert(l.HasTables(), IsTrue) 1565 1566 // TrySync with second table 1567 info = newInfoWithVersion(task, source1, db, tbl2, downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 1568 DDLs, cols, err = l.TrySync(info, tts) 1569 c.Assert(err, IsNil) 1570 c.Assert(DDLs, DeepEquals, DDLs1) 1571 c.Assert(cols, DeepEquals, []string{"c1"}) 1572 c.Assert(l.versions, DeepEquals, vers) 1573 ready = l.Ready() 1574 c.Assert(ready, HasLen, 1) 1575 c.Assert(ready[source1], HasLen, 1) 1576 c.Assert(ready[source1][db], HasLen, 2) 1577 c.Assert(ready[source1][db][tbl1], IsTrue) 1578 c.Assert(ready[source1][db][tbl2], IsTrue) 1579 1580 // TryRemoveTableBySources with source1,source2 1581 cols = l.TryRemoveTableBySources([]string{source1}) 1582 c.Assert(cols, DeepEquals, []string{"c1"}) 1583 c.Assert(l.HasTables(), IsFalse) 1584 } 1585 1586 func (t *testLock) TestLockTryMarkDone(c *C) { 1587 var ( 1588 ID = "test_lock_try_mark_done-`foo`.`bar`" 1589 task = "test_lock_try_mark_done" 1590 source = "mysql-replica-1" 1591 downSchema = "foo" 1592 downTable = "bar" 1593 db = "foo" 1594 tbls = []string{"bar1", "bar2"} 1595 p = parser.New() 1596 se = mock.NewContext() 1597 tblID int64 = 111 1598 DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c1 INT"} 1599 DDLs2 = []string{"ALTER TABLE bar ADD COLUMN c1 INT", "ALTER TABLE bar ADD COLUMN c2 INT"} 1600 DDLs3 = []string{"ALTER TABLE bar ADD COLUMN c2 INT"} 1601 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 1602 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT)`) 1603 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, c2 INT)`) 1604 ti3 = ti2 1605 1606 tables = map[string]map[string]struct{}{db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}} 1607 tts = []TargetTable{newTargetTable(task, source, downSchema, downTable, tables)} 1608 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 1609 1610 vers = map[string]map[string]map[string]int64{ 1611 source: { 1612 db: {tbls[0]: 0, tbls[1]: 0}, 1613 }, 1614 } 1615 ) 1616 1617 // the initial status is synced but not resolved. 1618 t.checkLockSynced(c, l) 1619 t.checkLockNoDone(c, l) 1620 c.Assert(l.IsResolved(), IsFalse) 1621 1622 // TrySync for the first table, no table has done the DDLs operation. 1623 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 1624 DDLs, cols, err := l.TrySync(info, tts) 1625 c.Assert(err, IsNil) 1626 c.Assert(DDLs, DeepEquals, DDLs1) 1627 c.Assert(cols, DeepEquals, []string{}) 1628 c.Assert(l.versions, DeepEquals, vers) 1629 t.checkLockNoDone(c, l) 1630 c.Assert(l.IsResolved(), IsFalse) 1631 1632 // mark done for the synced table, the lock is un-resolved. 1633 c.Assert(l.TryMarkDone(source, db, tbls[0]), IsTrue) 1634 c.Assert(l.IsDone(source, db, tbls[0]), IsTrue) 1635 c.Assert(l.IsDone(source, db, tbls[1]), IsFalse) 1636 c.Assert(l.IsResolved(), IsFalse) 1637 1638 // TrySync for the second table, the joined schema become larger. 1639 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti0, []*model.TableInfo{ti1, ti2}, vers) 1640 DDLs, cols, err = l.TrySync(info, tts) 1641 c.Assert(err, IsNil) 1642 c.Assert(DDLs, DeepEquals, DDLs2) 1643 c.Assert(cols, DeepEquals, []string{}) 1644 c.Assert(l.versions, DeepEquals, vers) 1645 1646 // the first table is still keep `done` (for the previous DDLs operation) 1647 c.Assert(l.IsDone(source, db, tbls[0]), IsTrue) 1648 c.Assert(l.IsDone(source, db, tbls[1]), IsFalse) 1649 c.Assert(l.IsResolved(), IsFalse) 1650 1651 // mark done for the second table, both of them are done (for different DDLs operations) 1652 c.Assert(l.TryMarkDone(source, db, tbls[1]), IsTrue) 1653 c.Assert(l.IsDone(source, db, tbls[0]), IsTrue) 1654 c.Assert(l.IsDone(source, db, tbls[1]), IsTrue) 1655 // but the lock is still not resolved because tables have different schemas. 1656 c.Assert(l.IsResolved(), IsFalse) 1657 1658 // TrySync for the first table, all tables become synced. 1659 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs3, ti1, []*model.TableInfo{ti3}, vers) 1660 DDLs, cols, err = l.TrySync(info, tts) 1661 c.Assert(err, IsNil) 1662 c.Assert(DDLs, DeepEquals, DDLs3) 1663 c.Assert(cols, DeepEquals, []string{}) 1664 c.Assert(l.versions, DeepEquals, vers) 1665 1666 // the first table become not-done, and the lock is un-resolved. 1667 c.Assert(l.IsDone(source, db, tbls[0]), IsFalse) 1668 c.Assert(l.IsDone(source, db, tbls[1]), IsTrue) 1669 c.Assert(l.IsResolved(), IsFalse) 1670 1671 // mark done for the first table. 1672 c.Assert(l.TryMarkDone(source, db, tbls[0]), IsTrue) 1673 c.Assert(l.IsDone(source, db, tbls[0]), IsTrue) 1674 c.Assert(l.IsDone(source, db, tbls[1]), IsTrue) 1675 1676 // the lock become resolved now. 1677 c.Assert(l.IsResolved(), IsTrue) 1678 1679 // TryMarkDone for not-existing table take no effect. 1680 c.Assert(l.TryMarkDone(source, db, "not-exist"), IsFalse) 1681 c.Assert(l.TryMarkDone(source, "not-exist", tbls[0]), IsFalse) 1682 c.Assert(l.TryMarkDone("not-exist", db, tbls[0]), IsFalse) 1683 1684 // check IsDone for not-existing table take no effect. 1685 c.Assert(l.IsDone(source, db, "not-exist"), IsFalse) 1686 c.Assert(l.IsDone(source, "not-exist", tbls[0]), IsFalse) 1687 c.Assert(l.IsDone("not-exist", db, tbls[0]), IsFalse) 1688 } 1689 1690 func (t *testLock) TestAddDifferentFieldLenColumns(c *C) { 1691 var ( 1692 ID = "test_lock_add_diff_flen_cols-`foo`.`bar`" 1693 task = "test_lock_add_diff_flen_cols" 1694 source = "mysql-replica-1" 1695 downSchema = "foo" 1696 downTable = "bar" 1697 db = "foo" 1698 tbls = []string{"bar1", "bar2"} 1699 p = parser.New() 1700 se = mock.NewContext() 1701 1702 tblID int64 = 111 1703 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 1704 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 VARCHAR(4))`) 1705 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 VARCHAR(5))`) 1706 1707 DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c1 VARCHAR(4)"} 1708 DDLs2 = []string{"ALTER TABLE bar ADD COLUMN c1 VARCHAR(5)"} 1709 1710 table1 = schemacmp.Encode(ti0) 1711 table2 = schemacmp.Encode(ti1) 1712 table3 = schemacmp.Encode(ti2) 1713 1714 tables = map[string]map[string]struct{}{db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}} 1715 tts = []TargetTable{newTargetTable(task, source, downSchema, downTable, tables)} 1716 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 1717 1718 vers = map[string]map[string]map[string]int64{ 1719 source: { 1720 db: {tbls[0]: 0, tbls[1]: 0}, 1721 }, 1722 } 1723 ) 1724 col, err := AddDifferentFieldLenColumns(ID, DDLs1[0], table1, table2) 1725 c.Assert(col, Equals, "c1") 1726 c.Assert(err, IsNil) 1727 col, err = AddDifferentFieldLenColumns(ID, DDLs2[0], table2, table3) 1728 c.Assert(col, Equals, "c1") 1729 c.Assert(err, ErrorMatches, ".*add columns with different field lengths.*") 1730 col, err = AddDifferentFieldLenColumns(ID, DDLs1[0], table3, table2) 1731 c.Assert(col, Equals, "c1") 1732 c.Assert(err, ErrorMatches, ".*add columns with different field lengths.*") 1733 1734 // the initial status is synced but not resolved. 1735 t.checkLockSynced(c, l) 1736 t.checkLockNoDone(c, l) 1737 c.Assert(l.IsResolved(), IsFalse) 1738 1739 // TrySync for the first table, no table has done the DDLs operation. 1740 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 1741 DDLs, cols, err := l.TrySync(info, tts) 1742 c.Assert(err, IsNil) 1743 c.Assert(DDLs, DeepEquals, DDLs1) 1744 c.Assert(cols, DeepEquals, []string{}) 1745 c.Assert(l.versions, DeepEquals, vers) 1746 t.checkLockNoDone(c, l) 1747 c.Assert(l.IsResolved(), IsFalse) 1748 1749 // TrySync for the second table, add a table with a larger field length 1750 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti0, []*model.TableInfo{ti2}, vers) 1751 DDLs, cols, err = l.TrySync(info, tts) 1752 c.Assert(err, ErrorMatches, ".*add columns with different field lengths.*") 1753 c.Assert(DDLs, DeepEquals, []string{}) 1754 c.Assert(cols, DeepEquals, []string{}) 1755 c.Assert(l.versions, DeepEquals, vers) 1756 1757 // case 2: add a column with a smaller field length 1758 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 1759 1760 // TrySync for the first table, no table has done the DDLs operation. 1761 vers[source][db][tbls[0]]-- 1762 info = NewInfo(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti0, []*model.TableInfo{ti2}) 1763 info.Version = vers[source][db][tbls[1]] 1764 DDLs, cols, err = l.TrySync(info, tts) 1765 c.Assert(err, IsNil) 1766 c.Assert(DDLs, DeepEquals, DDLs2) 1767 c.Assert(cols, DeepEquals, []string{}) 1768 c.Assert(l.versions, DeepEquals, vers) 1769 t.checkLockNoDone(c, l) 1770 c.Assert(l.IsResolved(), IsFalse) 1771 1772 // TrySync for the second table, add a table with a smaller field length 1773 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 1774 DDLs, cols, err = l.TrySync(info, tts) 1775 c.Assert(err, ErrorMatches, ".*add columns with different field lengths.*") 1776 c.Assert(DDLs, DeepEquals, []string{}) 1777 c.Assert(cols, DeepEquals, []string{}) 1778 c.Assert(l.versions, DeepEquals, vers) 1779 } 1780 1781 func (t *testLock) TestAddNotFullyDroppedColumns(c *C) { 1782 var ( 1783 ID = "test_lock_add_not_fully_dropped_cols-`foo`.`bar`" 1784 task = "test_lock_add_not_fully_dropped_cols" 1785 source = "mysql-replica-1" 1786 downSchema = "foo" 1787 downTable = "bar" 1788 db = "foo" 1789 tbls = []string{"bar1", "bar2"} 1790 p = parser.New() 1791 se = mock.NewContext() 1792 1793 tblID int64 = 111 1794 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, b int, c int)`) 1795 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, b int)`) 1796 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 1797 ti3 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c int)`) 1798 1799 DDLs1 = []string{"ALTER TABLE bar DROP COLUMN c"} 1800 DDLs2 = []string{"ALTER TABLE bar DROP COLUMN b"} 1801 DDLs3 = []string{"ALTER TABLE bar ADD COLUMN b INT"} 1802 DDLs4 = []string{"ALTER TABLE bar ADD COLUMN c INT"} 1803 DDLs5 = []string{"ALTER TABLE bar DROP COLUMN c", "ALTER TABLE bar ADD COLUMN c INT"} 1804 1805 tables = map[string]map[string]struct{}{db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}} 1806 tts = []TargetTable{newTargetTable(task, source, downSchema, downTable, tables)} 1807 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 1808 1809 vers = map[string]map[string]map[string]int64{ 1810 source: { 1811 db: {tbls[0]: 0, tbls[1]: 0}, 1812 }, 1813 } 1814 1815 colm1 = map[string]map[string]map[string]map[string]map[string]DropColumnStage{ 1816 ID: { 1817 "b": {source: {db: {tbls[0]: DropNotDone}}}, 1818 "c": {source: {db: {tbls[0]: DropNotDone}}}, 1819 }, 1820 } 1821 colm2 = map[string]map[string]map[string]map[string]map[string]DropColumnStage{ 1822 ID: { 1823 "b": {source: {db: {tbls[0]: DropNotDone, tbls[1]: DropDone}}}, 1824 "c": {source: {db: {tbls[0]: DropNotDone}}}, 1825 }, 1826 } 1827 colm3 = map[string]map[string]map[string]map[string]map[string]DropColumnStage{ 1828 ID: { 1829 "c": {source: {db: {tbls[0]: DropNotDone}}}, 1830 }, 1831 } 1832 ) 1833 col, err := GetColumnName(ID, DDLs1[0], ast.AlterTableDropColumn) 1834 c.Assert(col, Equals, "c") 1835 c.Assert(err, IsNil) 1836 col, err = GetColumnName(ID, DDLs2[0], ast.AlterTableDropColumn) 1837 c.Assert(col, Equals, "b") 1838 c.Assert(err, IsNil) 1839 1840 // the initial status is synced but not resolved. 1841 t.checkLockSynced(c, l) 1842 t.checkLockNoDone(c, l) 1843 c.Assert(l.IsResolved(), IsFalse) 1844 1845 // TrySync for the first table, drop column c 1846 DDLs, cols, err := l.TrySync(newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers), tts) 1847 c.Assert(err, IsNil) 1848 c.Assert(DDLs, DeepEquals, []string{}) 1849 c.Assert(cols, DeepEquals, []string{"c"}) 1850 c.Assert(l.versions, DeepEquals, vers) 1851 c.Assert(l.IsResolved(), IsFalse) 1852 1853 // TrySync for the first table, drop column b 1854 DDLs, cols, err = l.TrySync(newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2}, vers), tts) 1855 c.Assert(err, IsNil) 1856 c.Assert(DDLs, DeepEquals, []string{}) 1857 c.Assert(cols, DeepEquals, []string{"b"}) 1858 c.Assert(l.versions, DeepEquals, vers) 1859 c.Assert(l.IsResolved(), IsFalse) 1860 1861 colm, _, err := GetAllDroppedColumns(etcdTestCli) 1862 c.Assert(err, IsNil) 1863 c.Assert(colm, DeepEquals, colm1) 1864 1865 // TrySync for the second table, drop column b, this column should be fully dropped 1866 DDLs, cols, err = l.TrySync(newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti0, []*model.TableInfo{ti3}, vers), tts) 1867 c.Assert(err, IsNil) 1868 c.Assert(DDLs, DeepEquals, DDLs2) 1869 c.Assert(cols, DeepEquals, []string{"b"}) 1870 c.Assert(l.versions, DeepEquals, vers) 1871 c.Assert(l.IsResolved(), IsFalse) 1872 // Simulate watch done operation from dm-worker 1873 op := NewOperation(utils.GenDDLLockID(task, downSchema, downTable), task, source, db, tbls[1], DDLs2, ConflictNone, "", true, []string{"b"}) 1874 c.Assert(l.DeleteColumnsByOp(op), IsNil) 1875 1876 colm, _, err = GetAllDroppedColumns(etcdTestCli) 1877 c.Assert(err, IsNil) 1878 c.Assert(colm, DeepEquals, colm2) 1879 1880 op = NewOperation(utils.GenDDLLockID(task, downSchema, downTable), task, source, db, tbls[0], []string{}, ConflictNone, "", true, []string{"b"}) 1881 c.Assert(l.DeleteColumnsByOp(op), IsNil) 1882 1883 colm, _, err = GetAllDroppedColumns(etcdTestCli) 1884 c.Assert(err, IsNil) 1885 c.Assert(colm, DeepEquals, colm3) 1886 1887 // TrySync for the first table, add column b, should succeed, because this column is fully dropped in the downstream 1888 DDLs, cols, err = l.TrySync(newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs3, ti2, []*model.TableInfo{ti1}, vers), tts) 1889 c.Assert(err, IsNil) 1890 c.Assert(DDLs, DeepEquals, DDLs3) 1891 c.Assert(cols, DeepEquals, []string{}) 1892 c.Assert(l.versions, DeepEquals, vers) 1893 c.Assert(l.IsResolved(), IsFalse) 1894 1895 // TrySync for the first table, add column c, should fail, because this column isn't fully dropped in the downstream 1896 _, _, err = l.TrySync(newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs4, ti1, []*model.TableInfo{ti0}, vers), tts) 1897 c.Assert(err, ErrorMatches, ".*add column c that wasn't fully dropped in downstream.*") 1898 c.Assert(l.IsResolved(), IsFalse) 1899 1900 // TrySync for the second table, drop column c, this column should be fully dropped 1901 DDLs, cols, err = l.TrySync(newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs1, ti3, []*model.TableInfo{ti2}, vers), tts) 1902 c.Assert(err, IsNil) 1903 c.Assert(DDLs, DeepEquals, DDLs1) 1904 c.Assert(cols, DeepEquals, []string{"c"}) 1905 c.Assert(l.versions, DeepEquals, vers) 1906 c.Assert(l.IsResolved(), IsFalse) 1907 // Simulate watch done operation from dm-worker 1908 op = NewOperation(utils.GenDDLLockID(task, downSchema, downTable), task, source, db, tbls[1], DDLs1, ConflictNone, "", true, []string{"c"}) 1909 c.Assert(l.DeleteColumnsByOp(op), IsNil) 1910 1911 // TrySync for the second table, add column c, should fail, because this column isn't fully dropped in the downstream 1912 _, _, err = l.TrySync(newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs4, ti1, []*model.TableInfo{ti0}, vers), tts) 1913 c.Assert(err, ErrorMatches, ".*add column c that wasn't fully dropped in downstream.*") 1914 c.Assert(l.IsResolved(), IsFalse) 1915 1916 // Simulate watch done operation from dm-worker 1917 op = NewOperation(utils.GenDDLLockID(task, downSchema, downTable), task, source, db, tbls[0], []string{}, ConflictNone, "", true, []string{"c"}) 1918 c.Assert(l.DeleteColumnsByOp(op), IsNil) 1919 1920 // TrySync for the first table, add column c, should succeed, because this column is fully dropped in the downstream 1921 DDLs, cols, err = l.TrySync(newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs4, ti1, []*model.TableInfo{ti0}, vers), tts) 1922 c.Assert(err, IsNil) 1923 c.Assert(DDLs, DeepEquals, DDLs4) 1924 c.Assert(cols, DeepEquals, []string{}) 1925 c.Assert(l.versions, DeepEquals, vers) 1926 c.Assert(l.IsResolved(), IsFalse) 1927 1928 _, _, err = l.TrySync(newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs5, ti0, []*model.TableInfo{ti1, ti0}, vers), tts) 1929 c.Assert(err, ErrorMatches, ".*add column c that wasn't fully dropped in downstream.*") 1930 } 1931 1932 func (t *testLock) trySyncForAllTablesLarger(c *C, l *Lock, 1933 ddls []string, tableInfoBefore *model.TableInfo, tis []*model.TableInfo, tts []TargetTable, vers map[string]map[string]map[string]int64, 1934 ) { 1935 for source, schemaTables := range l.Ready() { 1936 for schema, tables := range schemaTables { 1937 for table := range tables { 1938 info := newInfoWithVersion(l.Task, source, schema, table, l.DownSchema, l.DownTable, ddls, tableInfoBefore, tis, vers) 1939 DDLs2, cols, err := l.TrySync(info, tts) 1940 c.Assert(err, IsNil) 1941 c.Assert(cols, DeepEquals, []string{}) 1942 c.Assert(DDLs2, DeepEquals, ddls) 1943 } 1944 } 1945 } 1946 } 1947 1948 func (t *testLock) checkLockSynced(c *C, l *Lock) { 1949 synced, remain := l.IsSynced() 1950 c.Assert(synced, Equals, l.synced) 1951 c.Assert(synced, IsTrue) 1952 c.Assert(remain, Equals, 0) 1953 1954 ready := l.Ready() 1955 for _, schemaTables := range ready { 1956 for _, tables := range schemaTables { 1957 for _, synced := range tables { 1958 c.Assert(synced, IsTrue) 1959 } 1960 } 1961 } 1962 } 1963 1964 func (t *testLock) checkLockNoDone(c *C, l *Lock) { 1965 c.Assert(l.IsResolved(), IsFalse) 1966 for source, schemaTables := range l.Ready() { 1967 for schema, tables := range schemaTables { 1968 for table := range tables { 1969 c.Assert(l.IsDone(source, schema, table), IsFalse) 1970 } 1971 } 1972 } 1973 } 1974 1975 func newInfoWithVersion(task, source, upSchema, upTable, downSchema, downTable string, ddls []string, tableInfoBefore *model.TableInfo, 1976 tableInfosAfter []*model.TableInfo, vers map[string]map[string]map[string]int64, 1977 ) Info { 1978 info := NewInfo(task, source, upSchema, upTable, downSchema, downTable, ddls, tableInfoBefore, tableInfosAfter) 1979 vers[source][upSchema][upTable]++ 1980 info.Version = vers[source][upSchema][upTable] 1981 return info 1982 } 1983 1984 func (t *testLock) TestLockTrySyncDifferentIndex(c *C) { 1985 var ( 1986 ID = "test_lock_try_sync_index-`foo`.`bar`" 1987 task = "test_lock_try_sync_index" 1988 source = "mysql-replica-1" 1989 downSchema = "db" 1990 downTable = "bar" 1991 db = "db" 1992 tbls = []string{"bar1", "bar2"} 1993 p = parser.New() 1994 se = mock.NewContext() 1995 tblID int64 = 111 1996 DDLs1 = []string{"ALTER TABLE bar DROP INDEX idx_c1"} 1997 DDLs2 = []string{"ALTER TABLE bar ADD INDEX new_idx(c1)"} 1998 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, UNIQUE INDEX idx_c1(c1))`) 1999 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT)`) 2000 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, INDEX new_idx(c1))`) 2001 tables = map[string]map[string]struct{}{ 2002 db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 2003 } 2004 tts = []TargetTable{ 2005 newTargetTable(task, source, downSchema, downTable, tables), 2006 } 2007 2008 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 2009 2010 vers = map[string]map[string]map[string]int64{ 2011 source: { 2012 db: {tbls[0]: 0, tbls[1]: 0}, 2013 }, 2014 } 2015 ) 2016 2017 // the initial status is synced. 2018 t.checkLockSynced(c, l) 2019 t.checkLockNoDone(c, l) 2020 2021 // try sync for one table, `DROP INDEX` returned directly (to make schema become more compatible). 2022 // `DROP INDEX` is handled like `ADD COLUMN`. 2023 info := newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs1, ti0, []*model.TableInfo{ti1}, vers) 2024 DDLs, cols, err := l.TrySync(info, tts) 2025 c.Assert(err, IsNil) 2026 c.Assert(DDLs, DeepEquals, DDLs1) 2027 c.Assert(cols, DeepEquals, []string{}) 2028 c.Assert(l.versions, DeepEquals, vers) 2029 synced, remain := l.IsSynced() 2030 c.Assert(synced, Equals, l.synced) 2031 c.Assert(synced, IsFalse) 2032 c.Assert(remain, Equals, 1) 2033 2034 cmp, err := l.tables[source][db][tbls[1]].Compare(schemacmp.Encode(ti0)) 2035 c.Assert(err, IsNil) 2036 c.Assert(cmp, Equals, 0) 2037 2038 // try sync ADD another INDEX for another table 2039 // `ADD INDEX` is handled like `DROP COLUMN`. 2040 info = newInfoWithVersion(task, source, db, tbls[1], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2}, vers) 2041 DDLs, cols, err = l.TrySync(info, tts) 2042 c.Assert(err, IsNil) 2043 c.Assert(DDLs, DeepEquals, []string{}) // no DDLs returned 2044 c.Assert(cols, DeepEquals, []string{}) 2045 c.Assert(l.versions, DeepEquals, vers) 2046 synced, remain = l.IsSynced() 2047 c.Assert(synced, Equals, l.synced) 2048 c.Assert(synced, IsFalse) 2049 c.Assert(remain, Equals, 1) 2050 2051 joined, err := l.joinFinalTables() 2052 c.Assert(err, IsNil) 2053 cmp, err = l.tables[source][db][tbls[0]].Compare(joined) 2054 c.Assert(err, IsNil) 2055 c.Assert(cmp, Equals, 0) 2056 2057 // try sync ADD INDEX for first table 2058 info = newInfoWithVersion(task, source, db, tbls[0], downSchema, downTable, DDLs2, ti1, []*model.TableInfo{ti2}, vers) 2059 DDLs, cols, err = l.TrySync(info, tts) 2060 c.Assert(err, IsNil) 2061 c.Assert(DDLs, DeepEquals, DDLs2) 2062 c.Assert(cols, DeepEquals, []string{}) 2063 c.Assert(l.versions, DeepEquals, vers) 2064 t.checkLockSynced(c, l) 2065 } 2066 2067 func (t *testLock) TestFetchTableInfo(c *C) { 2068 var ( 2069 meta = "meta" 2070 ID = "test_lock_try_sync_index-`foo`.`bar`" 2071 task = "test_lock_try_sync_index" 2072 source = "mysql-replica-1" 2073 downSchema = "db" 2074 downTable = "bar" 2075 schema = "db" 2076 tbls = []string{"bar1", "bar2"} 2077 p = parser.New() 2078 se = mock.NewContext() 2079 tblID int64 = 111 2080 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 INT, UNIQUE INDEX idx_c1(c1))`) 2081 tables = map[string]map[string]struct{}{ 2082 schema: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 2083 } 2084 tts = []TargetTable{ 2085 newTargetTable(task, source, downSchema, downTable, tables), 2086 } 2087 query = fmt.Sprintf("SELECT table_info FROM `%s`.`%s` WHERE id = \\? AND cp_schema = \\? AND cp_table = \\?", meta, cputil.SyncerCheckpoint(task)) 2088 ) 2089 2090 // nil downstream meta 2091 l := NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 2092 ti, err := l.FetchTableInfos(task, source, schema, tbls[0]) 2093 c.Assert(terror.ErrMasterOptimisticDownstreamMetaNotFound.Equal(err), IsTrue) 2094 c.Assert(ti, IsNil) 2095 2096 // table info not exist 2097 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, &DownstreamMeta{dbConfig: &dbconfig.DBConfig{}, meta: meta}) 2098 conn.DefaultDBProvider = &conn.DefaultDBProviderImpl{} 2099 mock := conn.InitMockDB(c) 2100 mock.ExpectQuery(query).WithArgs(source, schema, tbls[0]).WillReturnRows(sqlmock.NewRows([]string{"table_info"})) 2101 ti, err = l.FetchTableInfos(task, source, schema, tbls[0]) 2102 c.Assert(terror.ErrDBExecuteFailed.Equal(err), IsTrue) 2103 c.Assert(ti, IsNil) 2104 2105 // null table info 2106 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, &DownstreamMeta{dbConfig: &dbconfig.DBConfig{}, meta: meta}) 2107 conn.DefaultDBProvider = &conn.DefaultDBProviderImpl{} 2108 mock = conn.InitMockDB(c) 2109 mock.ExpectQuery(query).WithArgs(source, schema, tbls[0]).WillReturnRows(sqlmock.NewRows([]string{"table_info"}).AddRow("null")) 2110 ti, err = l.FetchTableInfos(task, source, schema, tbls[0]) 2111 c.Assert(terror.ErrMasterOptimisticDownstreamMetaNotFound.Equal(err), IsTrue) 2112 c.Assert(ti, IsNil) 2113 2114 // succeed 2115 tiBytes, err := json.Marshal(ti0) 2116 c.Assert(err, IsNil) 2117 conn.DefaultDBProvider = &conn.DefaultDBProviderImpl{} 2118 mock = conn.InitMockDB(c) 2119 mock.ExpectQuery(query).WithArgs(source, schema, tbls[0]).WillReturnRows(sqlmock.NewRows([]string{"table_info"}).AddRow(tiBytes)) 2120 ti, err = l.FetchTableInfos(task, source, schema, tbls[0]) 2121 c.Assert(err, IsNil) 2122 c.Assert(mock.ExpectationsWereMet(), IsNil) 2123 c.Assert(ti, DeepEquals, ti0) 2124 } 2125 2126 func (t *testLock) TestCheckAddDropColumns(c *C) { 2127 var ( 2128 ID = "test-`foo`.`bar`" 2129 task = "test" 2130 source = "mysql-replica-1" 2131 downSchema = "db" 2132 downTable = "bar" 2133 db = "db" 2134 tbls = []string{"bar1", "bar2"} 2135 p = parser.New() 2136 se = mock.NewContext() 2137 tblID int64 = 111 2138 DDLs1 = "ALTER TABLE bar ADD COLUMN a VARCHAR(1)" 2139 DDLs2 = "ALTER TABLE bar ADD COLUMN a VARCHAR(2)" 2140 DDLs3 = "ALTER TABLE bar DROP COLUMN col" 2141 DDLs4 = "ALTER TABLE bar ADD COLUMN col int" 2142 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int)`) 2143 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int, a VARCHAR(1))`) 2144 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int, a VARCHAR(2))`) 2145 ti3 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, a VARCHAR(2))`) 2146 tables = map[string]map[string]struct{}{ 2147 db: {tbls[0]: struct{}{}, tbls[1]: struct{}{}}, 2148 } 2149 tts = []TargetTable{ 2150 newTargetTable(task, source, downSchema, downTable, tables), 2151 } 2152 2153 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, schemacmp.Encode(ti0), tts, nil) 2154 ) 2155 2156 l.tables[source][db][tbls[0]] = schemacmp.Encode(ti0) 2157 l.tables[source][db][tbls[1]] = schemacmp.Encode(ti1) 2158 2159 col, err := l.checkAddDropColumn(source, db, tbls[0], DDLs1, schemacmp.Encode(ti0), schemacmp.Encode(ti1), nil) 2160 2161 c.Assert(err, IsNil) 2162 c.Assert(len(col), Equals, 0) 2163 2164 l.tables[source][db][tbls[0]] = schemacmp.Encode(ti1) 2165 col, err = l.checkAddDropColumn(source, db, tbls[1], DDLs2, schemacmp.Encode(ti0), schemacmp.Encode(ti2), nil) 2166 c.Assert(err, NotNil) 2167 c.Assert(err, ErrorMatches, ".*add columns with different field lengths.*") 2168 c.Assert(len(col), Equals, 0) 2169 2170 col, err = l.checkAddDropColumn(source, db, tbls[0], DDLs3, schemacmp.Encode(ti2), schemacmp.Encode(ti3), nil) 2171 c.Assert(err, IsNil) 2172 c.Assert(col, Equals, "col") 2173 2174 l.columns = map[string]map[string]map[string]map[string]DropColumnStage{ 2175 "col": { 2176 source: { 2177 db: {tbls[0]: DropNotDone}, 2178 }, 2179 }, 2180 } 2181 2182 col, err = l.checkAddDropColumn(source, db, tbls[0], DDLs4, schemacmp.Encode(ti3), schemacmp.Encode(ti2), nil) 2183 c.Assert(err, NotNil) 2184 c.Assert(err, ErrorMatches, ".*add column .* that wasn't fully dropped in downstream.*") 2185 c.Assert(len(col), Equals, 0) 2186 } 2187 2188 func (t *testLock) TestJoinTables(c *C) { 2189 var ( 2190 source = "mysql-replica-1" 2191 db = "db" 2192 tbls = []string{"bar1", "bar2"} 2193 p = parser.New() 2194 se = mock.NewContext() 2195 tblID int64 = 111 2196 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int)`) 2197 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int, a VARCHAR(1))`) 2198 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col varchar(4))`) 2199 t0 = schemacmp.Encode(ti0) 2200 t1 = schemacmp.Encode(ti1) 2201 t2 = schemacmp.Encode(ti2) 2202 ) 2203 2204 l := &Lock{ 2205 tables: map[string]map[string]map[string]schemacmp.Table{ 2206 source: { 2207 db: {tbls[0]: t0, tbls[1]: t0}, 2208 }, 2209 }, 2210 finalTables: map[string]map[string]map[string]schemacmp.Table{ 2211 source: { 2212 db: {tbls[0]: t0, tbls[1]: t0}, 2213 }, 2214 }, 2215 } 2216 2217 joined, err := l.joinNormalTables() 2218 c.Assert(err, IsNil) 2219 cmp, err := joined.Compare(t0) 2220 c.Assert(err, IsNil) 2221 c.Assert(cmp, Equals, 0) 2222 joined, err = l.joinFinalTables() 2223 c.Assert(err, IsNil) 2224 cmp, err = joined.Compare(t0) 2225 c.Assert(err, IsNil) 2226 c.Assert(cmp, Equals, 0) 2227 _, err = l.joinConflictTables() 2228 c.Assert(err, IsNil) 2229 2230 l.tables[source][db][tbls[0]] = t1 2231 l.finalTables[source][db][tbls[0]] = t1 2232 2233 joined, err = l.joinNormalTables() 2234 c.Assert(err, IsNil) 2235 cmp, err = joined.Compare(t1) 2236 c.Assert(err, IsNil) 2237 c.Assert(cmp, Equals, 0) 2238 joined, err = l.joinFinalTables() 2239 c.Assert(err, IsNil) 2240 cmp, err = joined.Compare(t1) 2241 c.Assert(err, IsNil) 2242 c.Assert(cmp, Equals, 0) 2243 _, err = l.joinConflictTables() 2244 c.Assert(err, IsNil) 2245 2246 l.tables[source][db][tbls[1]] = t1 2247 l.finalTables[source][db][tbls[1]] = t1 2248 l.conflictTables = map[string]map[string]map[string]schemacmp.Table{ 2249 source: { 2250 db: {tbls[0]: t2}, 2251 }, 2252 } 2253 2254 joined, err = l.joinNormalTables() 2255 c.Assert(err, IsNil) 2256 cmp, err = joined.Compare(t1) 2257 c.Assert(err, IsNil) 2258 c.Assert(cmp, Equals, 0) 2259 joined, err = l.joinFinalTables() 2260 c.Assert(err, IsNil) 2261 cmp, err = joined.Compare(t1) 2262 c.Assert(err, IsNil) 2263 c.Assert(cmp, Equals, 0) 2264 joined, err = l.joinConflictTables() 2265 c.Assert(err, IsNil) 2266 cmp, err = joined.Compare(t2) 2267 c.Assert(err, IsNil) 2268 c.Assert(cmp, Equals, 0) 2269 2270 l.tables[source][db][tbls[1]] = t2 2271 _, err = l.joinNormalTables() 2272 c.Assert(err, NotNil) 2273 c.Assert(err, ErrorMatches, ".*incompatible mysql type.*") 2274 2275 l.resolveTables() 2276 c.Assert(l.conflictTables, HasLen, 0) 2277 c.Assert(l.tables, DeepEquals, l.finalTables) 2278 } 2279 2280 func (t *testLock) TestAddRemoveConflictTable(c *C) { 2281 var ( 2282 source = "source" 2283 schema = "schema" 2284 table1 = "table1" 2285 table2 = "table2" 2286 table3 = "table3" 2287 p = parser.New() 2288 se = mock.NewContext() 2289 tblID int64 = 111 2290 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int)`) 2291 t0 = schemacmp.Encode(ti0) 2292 ) 2293 l := &Lock{ 2294 conflictTables: make(map[string]map[string]map[string]schemacmp.Table), 2295 } 2296 c.Assert(l.conflictTables, HasLen, 0) 2297 2298 l.addConflictTable(source, schema, table1, t0) 2299 c.Assert(l.conflictTables, HasLen, 1) 2300 c.Assert(l.conflictTables[source], HasLen, 1) 2301 c.Assert(l.conflictTables[source][schema], HasLen, 1) 2302 tb := l.conflictTables[source][schema][table1] 2303 cmp, err := tb.Compare(t0) 2304 c.Assert(err, IsNil) 2305 c.Assert(cmp, Equals, 0) 2306 2307 l.addConflictTable(source, schema, table1, t0) 2308 c.Assert(l.conflictTables, HasLen, 1) 2309 c.Assert(l.conflictTables[source], HasLen, 1) 2310 c.Assert(l.conflictTables[source][schema], HasLen, 1) 2311 2312 l.addConflictTable(source, schema, table2, t0) 2313 c.Assert(l.conflictTables, HasLen, 1) 2314 c.Assert(l.conflictTables[source], HasLen, 1) 2315 c.Assert(l.conflictTables[source][schema], HasLen, 2) 2316 tb = l.conflictTables[source][schema][table2] 2317 cmp, err = tb.Compare(t0) 2318 c.Assert(err, IsNil) 2319 c.Assert(cmp, Equals, 0) 2320 2321 l.removeConflictTable(source, schema, table3) 2322 c.Assert(l.conflictTables[source][schema], HasLen, 2) 2323 2324 l.removeConflictTable(source, schema, table1) 2325 c.Assert(l.conflictTables[source][schema], HasLen, 1) 2326 tb = l.conflictTables[source][schema][table2] 2327 cmp, err = tb.Compare(t0) 2328 c.Assert(err, IsNil) 2329 c.Assert(cmp, Equals, 0) 2330 2331 l.removeConflictTable(source, schema, table2) 2332 c.Assert(l.conflictTables, HasLen, 0) 2333 } 2334 2335 func (t *testLock) TestAllTableSmallerLarger(c *C) { 2336 var ( 2337 source = "source" 2338 schema = "schema" 2339 table1 = "table1" 2340 table2 = "table2" 2341 p = parser.New() 2342 se = mock.NewContext() 2343 tblID int64 = 111 2344 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int)`) 2345 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, new_col int)`) 2346 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, new_col varchar(4))`) 2347 ti3 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (a INT PRIMARY KEY, new_col varchar(4))`) 2348 ti4 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, new_col2 varchar(4))`) 2349 ti5 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id varchar(4) PRIMARY KEY, new_col int)`) 2350 ti6 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int, new_col int)`) 2351 ti7 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int, new_col varchar(4))`) 2352 ti8 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col int, col2 int not null)`) 2353 ti9 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (a INT PRIMARY KEY, new_col int)`) 2354 t0 = schemacmp.Encode(ti0) 2355 t1 = schemacmp.Encode(ti1) 2356 t2 = schemacmp.Encode(ti2) 2357 t3 = schemacmp.Encode(ti3) 2358 t4 = schemacmp.Encode(ti4) 2359 t5 = schemacmp.Encode(ti5) 2360 t6 = schemacmp.Encode(ti6) 2361 t7 = schemacmp.Encode(ti7) 2362 t8 = schemacmp.Encode(ti8) 2363 t9 = schemacmp.Encode(ti9) 2364 ) 2365 l := &Lock{ 2366 tables: map[string]map[string]map[string]schemacmp.Table{ 2367 source: { 2368 schema: {table1: t0, table2: t0}, 2369 }, 2370 }, 2371 finalTables: map[string]map[string]map[string]schemacmp.Table{ 2372 source: { 2373 schema: {table1: t0, table2: t0}, 2374 }, 2375 }, 2376 conflictTables: make(map[string]map[string]map[string]schemacmp.Table), 2377 } 2378 c.Assert(l.allFinalTableSmaller(), IsTrue) 2379 2380 // rename table 2381 l.addConflictTable(source, schema, table1, t1) 2382 l.finalTables[source][schema][table1] = t1 2383 c.Assert(l.allConflictTableSmaller(), IsTrue) 2384 c.Assert(l.allConflictTableLarger(), IsTrue) 2385 c.Assert(l.allFinalTableSmaller(), IsFalse) 2386 c.Assert(l.allFinalTableLarger(), IsFalse) 2387 l.addConflictTable(source, schema, table2, t1) 2388 l.finalTables[source][schema][table2] = t1 2389 c.Assert(l.allConflictTableSmaller(), IsTrue) 2390 c.Assert(l.allConflictTableLarger(), IsTrue) 2391 c.Assert(l.allFinalTableSmaller(), IsTrue) 2392 c.Assert(l.allFinalTableLarger(), IsTrue) 2393 // reset 2394 l.resolveTables() 2395 c.Assert(l.conflictTables, HasLen, 0) 2396 c.Assert(l.tables, DeepEquals, l.finalTables) 2397 c.Assert(l.tables[source][schema], HasLen, 2) 2398 2399 // modify column 2400 l.addConflictTable(source, schema, table1, t2) 2401 l.finalTables[source][schema][table1] = t2 2402 c.Assert(l.allConflictTableSmaller(), IsTrue) 2403 c.Assert(l.allConflictTableLarger(), IsTrue) 2404 c.Assert(l.allFinalTableSmaller(), IsFalse) 2405 c.Assert(l.allFinalTableLarger(), IsFalse) 2406 l.addConflictTable(source, schema, table2, t2) 2407 l.finalTables[source][schema][table2] = t2 2408 c.Assert(l.allConflictTableSmaller(), IsTrue) 2409 c.Assert(l.allConflictTableLarger(), IsTrue) 2410 c.Assert(l.allFinalTableSmaller(), IsTrue) 2411 c.Assert(l.allFinalTableLarger(), IsTrue) 2412 // reset 2413 l.resolveTables() 2414 c.Assert(l.conflictTables, HasLen, 0) 2415 c.Assert(l.tables, DeepEquals, l.finalTables) 2416 c.Assert(l.tables[source][schema], HasLen, 2) 2417 c.Assert(l.tables[source][schema][table1], DeepEquals, t2) 2418 c.Assert(l.tables[source][schema][table2], DeepEquals, t2) 2419 2420 // different rename 2421 l.addConflictTable(source, schema, table1, t3) 2422 l.finalTables[source][schema][table1] = t3 2423 c.Assert(l.allConflictTableSmaller(), IsTrue) 2424 c.Assert(l.allConflictTableLarger(), IsTrue) 2425 c.Assert(l.allFinalTableSmaller(), IsFalse) 2426 c.Assert(l.allFinalTableLarger(), IsFalse) 2427 l.addConflictTable(source, schema, table2, t4) 2428 l.finalTables[source][schema][table2] = t4 2429 c.Assert(l.allConflictTableSmaller(), IsFalse) 2430 c.Assert(l.allConflictTableLarger(), IsFalse) 2431 c.Assert(l.allFinalTableSmaller(), IsFalse) 2432 c.Assert(l.allFinalTableLarger(), IsFalse) 2433 // reset 2434 l.finalTables[source][schema][table1] = t1 2435 l.finalTables[source][schema][table2] = t1 2436 l.resolveTables() 2437 c.Assert(l.conflictTables, HasLen, 0) 2438 c.Assert(l.tables, DeepEquals, l.finalTables) 2439 c.Assert(l.tables[source][schema], HasLen, 2) 2440 c.Assert(l.tables[source][schema][table1], DeepEquals, t1) 2441 c.Assert(l.tables[source][schema][table2], DeepEquals, t1) 2442 2443 // different modify 2444 l.addConflictTable(source, schema, table1, t2) 2445 l.finalTables[source][schema][table1] = t2 2446 c.Assert(l.allConflictTableSmaller(), IsTrue) 2447 c.Assert(l.allConflictTableLarger(), IsTrue) 2448 c.Assert(l.allFinalTableSmaller(), IsFalse) 2449 c.Assert(l.allFinalTableLarger(), IsFalse) 2450 l.addConflictTable(source, schema, table2, t5) 2451 l.finalTables[source][schema][table2] = t5 2452 c.Assert(l.allConflictTableSmaller(), IsFalse) 2453 c.Assert(l.allConflictTableLarger(), IsFalse) 2454 c.Assert(l.allFinalTableSmaller(), IsFalse) 2455 c.Assert(l.allFinalTableLarger(), IsFalse) 2456 // reset 2457 l.finalTables[source][schema][table1] = t1 2458 l.finalTables[source][schema][table2] = t1 2459 l.resolveTables() 2460 c.Assert(l.conflictTables, HasLen, 0) 2461 c.Assert(l.tables, DeepEquals, l.finalTables) 2462 c.Assert(l.tables[source][schema], HasLen, 2) 2463 c.Assert(l.tables[source][schema][table1], DeepEquals, t1) 2464 c.Assert(l.tables[source][schema][table2], DeepEquals, t1) 2465 2466 // one table rename, one table modify 2467 l.addConflictTable(source, schema, table1, t4) 2468 l.finalTables[source][schema][table1] = t4 2469 c.Assert(l.allConflictTableSmaller(), IsTrue) 2470 c.Assert(l.allConflictTableLarger(), IsTrue) 2471 c.Assert(l.allFinalTableSmaller(), IsFalse) 2472 c.Assert(l.allFinalTableLarger(), IsFalse) 2473 l.addConflictTable(source, schema, table2, t5) 2474 l.finalTables[source][schema][table2] = t5 2475 c.Assert(l.allConflictTableSmaller(), IsFalse) 2476 c.Assert(l.allConflictTableLarger(), IsFalse) 2477 c.Assert(l.allFinalTableSmaller(), IsFalse) 2478 c.Assert(l.allFinalTableLarger(), IsFalse) 2479 // reset 2480 l.finalTables[source][schema][table1] = t0 2481 l.finalTables[source][schema][table2] = t0 2482 l.resolveTables() 2483 c.Assert(l.conflictTables, HasLen, 0) 2484 c.Assert(l.tables, DeepEquals, l.finalTables) 2485 c.Assert(l.tables[source][schema], HasLen, 2) 2486 c.Assert(l.tables[source][schema][table1], DeepEquals, t0) 2487 c.Assert(l.tables[source][schema][table2], DeepEquals, t0) 2488 2489 // one table rename, one table add and drop 2490 l.addConflictTable(source, schema, table1, t1) 2491 l.finalTables[source][schema][table1] = t1 2492 c.Assert(l.allConflictTableSmaller(), IsTrue) 2493 c.Assert(l.allConflictTableLarger(), IsTrue) 2494 c.Assert(l.allFinalTableSmaller(), IsFalse) 2495 c.Assert(l.allFinalTableLarger(), IsFalse) 2496 l.finalTables[source][schema][table2] = t6 2497 c.Assert(l.allConflictTableSmaller(), IsTrue) 2498 c.Assert(l.allConflictTableLarger(), IsTrue) 2499 c.Assert(l.allFinalTableSmaller(), IsFalse) 2500 c.Assert(l.allFinalTableLarger(), IsTrue) 2501 l.finalTables[source][schema][table2] = t1 2502 c.Assert(l.allConflictTableSmaller(), IsTrue) 2503 c.Assert(l.allConflictTableLarger(), IsTrue) 2504 c.Assert(l.allFinalTableSmaller(), IsTrue) 2505 c.Assert(l.allFinalTableLarger(), IsTrue) 2506 // reset 2507 l.finalTables[source][schema][table1] = t0 2508 l.finalTables[source][schema][table2] = t0 2509 l.resolveTables() 2510 c.Assert(l.conflictTables, HasLen, 0) 2511 c.Assert(l.tables, DeepEquals, l.finalTables) 2512 c.Assert(l.tables[source][schema], HasLen, 2) 2513 c.Assert(l.tables[source][schema][table1], DeepEquals, t0) 2514 c.Assert(l.tables[source][schema][table2], DeepEquals, t0) 2515 2516 // one table modify, one table add and drop 2517 l.addConflictTable(source, schema, table1, t2) 2518 l.finalTables[source][schema][table1] = t2 2519 c.Assert(l.allConflictTableSmaller(), IsTrue) 2520 c.Assert(l.allConflictTableLarger(), IsTrue) 2521 c.Assert(l.allFinalTableSmaller(), IsFalse) 2522 c.Assert(l.allFinalTableLarger(), IsFalse) 2523 l.finalTables[source][schema][table2] = t7 2524 c.Assert(l.allConflictTableSmaller(), IsTrue) 2525 c.Assert(l.allConflictTableLarger(), IsTrue) 2526 c.Assert(l.allFinalTableSmaller(), IsFalse) 2527 c.Assert(l.allFinalTableLarger(), IsTrue) 2528 l.finalTables[source][schema][table2] = t2 2529 c.Assert(l.allConflictTableSmaller(), IsTrue) 2530 c.Assert(l.allConflictTableLarger(), IsTrue) 2531 c.Assert(l.allFinalTableSmaller(), IsTrue) 2532 c.Assert(l.allFinalTableLarger(), IsTrue) 2533 // reset 2534 l.finalTables[source][schema][table1] = t0 2535 l.finalTables[source][schema][table2] = t0 2536 l.resolveTables() 2537 c.Assert(l.conflictTables, HasLen, 0) 2538 c.Assert(l.tables, DeepEquals, l.finalTables) 2539 c.Assert(l.tables[source][schema], HasLen, 2) 2540 c.Assert(l.tables[source][schema][table1], DeepEquals, t0) 2541 c.Assert(l.tables[source][schema][table2], DeepEquals, t0) 2542 2543 // not null no default 2544 l.addConflictTable(source, schema, table1, t8) 2545 l.finalTables[source][schema][table1] = t8 2546 c.Assert(l.allConflictTableSmaller(), IsTrue) 2547 c.Assert(l.allConflictTableLarger(), IsTrue) 2548 c.Assert(l.allFinalTableSmaller(), IsFalse) 2549 c.Assert(l.allFinalTableLarger(), IsFalse) 2550 l.addConflictTable(source, schema, table2, t8) 2551 l.finalTables[source][schema][table2] = t8 2552 c.Assert(l.allConflictTableSmaller(), IsTrue) 2553 c.Assert(l.allConflictTableLarger(), IsTrue) 2554 c.Assert(l.allFinalTableSmaller(), IsTrue) 2555 c.Assert(l.allFinalTableLarger(), IsTrue) 2556 // reset 2557 l.finalTables[source][schema][table1] = t0 2558 l.finalTables[source][schema][table2] = t0 2559 l.resolveTables() 2560 c.Assert(l.conflictTables, HasLen, 0) 2561 c.Assert(l.tables, DeepEquals, l.finalTables) 2562 c.Assert(l.tables[source][schema], HasLen, 2) 2563 c.Assert(l.tables[source][schema][table1], DeepEquals, t0) 2564 c.Assert(l.tables[source][schema][table2], DeepEquals, t0) 2565 2566 // multiple rename 2567 // tb1: rename col to new_col 2568 l.addConflictTable(source, schema, table1, t1) 2569 l.finalTables[source][schema][table1] = t1 2570 c.Assert(l.allConflictTableSmaller(), IsTrue) 2571 c.Assert(l.allConflictTableLarger(), IsTrue) 2572 c.Assert(l.allFinalTableSmaller(), IsFalse) 2573 c.Assert(l.allFinalTableLarger(), IsFalse) 2574 // tb2: rename col to new_col 2575 l.addConflictTable(source, schema, table2, t1) 2576 l.finalTables[source][schema][table2] = t1 2577 c.Assert(l.allConflictTableSmaller(), IsTrue) 2578 c.Assert(l.allConflictTableLarger(), IsTrue) 2579 c.Assert(l.allFinalTableSmaller(), IsTrue) 2580 c.Assert(l.allFinalTableLarger(), IsTrue) 2581 l.resolveTables() 2582 // tb1: rename id to a 2583 l.addConflictTable(source, schema, table1, t9) 2584 l.finalTables[source][schema][table1] = t9 2585 c.Assert(l.noConflictWithOneNormalTable(source, schema, table1, t1, t9), IsFalse) 2586 c.Assert(l.allConflictTableSmaller(), IsTrue) 2587 c.Assert(l.allConflictTableLarger(), IsTrue) 2588 c.Assert(l.allFinalTableSmaller(), IsFalse) 2589 c.Assert(l.allFinalTableLarger(), IsFalse) 2590 // tb2: rename col to new_col (idempotent) 2591 l.tables[source][schema][table2] = t0 2592 l.addConflictTable(source, schema, table2, t1) 2593 l.finalTables[source][schema][table2] = t1 2594 c.Assert(l.noConflictWithOneNormalTable(source, schema, table2, t0, t1), IsTrue) 2595 l.removeConflictTable(source, schema, table2) 2596 l.tables[source][schema][table2] = t1 2597 // tb2: rename id to a 2598 l.addConflictTable(source, schema, table2, t9) 2599 l.finalTables[source][schema][table2] = t9 2600 c.Assert(l.noConflictWithOneNormalTable(source, schema, table2, t1, t9), IsFalse) 2601 c.Assert(l.allConflictTableSmaller(), IsTrue) 2602 c.Assert(l.allConflictTableLarger(), IsTrue) 2603 c.Assert(l.allFinalTableSmaller(), IsTrue) 2604 c.Assert(l.allFinalTableLarger(), IsTrue) 2605 // reset 2606 l.finalTables[source][schema][table1] = t0 2607 l.finalTables[source][schema][table2] = t0 2608 l.resolveTables() 2609 c.Assert(l.conflictTables, HasLen, 0) 2610 c.Assert(l.tables, DeepEquals, l.finalTables) 2611 c.Assert(l.tables[source][schema], HasLen, 2) 2612 c.Assert(l.tables[source][schema][table1], DeepEquals, t0) 2613 c.Assert(l.tables[source][schema][table2], DeepEquals, t0) 2614 } 2615 2616 func (t *testLock) TestNoConflictWithOneNormalTable(c *C) { 2617 var ( 2618 source = "source" 2619 schema = "schema" 2620 table1 = "table1" 2621 table2 = "table2" 2622 p = parser.New() 2623 se = mock.NewContext() 2624 tblID int64 = 111 2625 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, a int, col int)`) 2626 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, a int, new_col int)`) 2627 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, a int, col varchar(4))`) 2628 ti3 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, a int, new_col2 int)`) 2629 ti4 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, b int, new_col int)`) 2630 t0 = schemacmp.Encode(ti0) 2631 t1 = schemacmp.Encode(ti1) 2632 t2 = schemacmp.Encode(ti2) 2633 t3 = schemacmp.Encode(ti3) 2634 t4 = schemacmp.Encode(ti4) 2635 ) 2636 l := &Lock{ 2637 tables: map[string]map[string]map[string]schemacmp.Table{ 2638 source: { 2639 schema: {table1: t0, table2: t0}, 2640 }, 2641 }, 2642 } 2643 2644 // table1 nothing happened. 2645 // table2 rename column 2646 c.Assert(l.noConflictWithOneNormalTable(source, schema, table2, t0, t1), IsFalse) 2647 2648 // mock table1 rename column already 2649 l.tables[source][schema][table1] = t1 2650 // table2 rename column 2651 c.Assert(l.noConflictWithOneNormalTable(source, schema, table2, t0, t1), IsTrue) 2652 // table2 modify column 2653 c.Assert(l.noConflictWithOneNormalTable(source, schema, table2, t0, t2), IsFalse) 2654 // table2 different rename 2655 c.Assert(l.noConflictWithOneNormalTable(source, schema, table2, t0, t3), IsFalse) 2656 2657 // mock table1 rename another column already 2658 l.tables[source][schema][table1] = t4 2659 // same results 2660 // table2 rename column 2661 c.Assert(l.noConflictWithOneNormalTable(source, schema, table2, t0, t1), IsTrue) 2662 // table2 modify column 2663 c.Assert(l.noConflictWithOneNormalTable(source, schema, table2, t0, t2), IsFalse) 2664 // table2 different rename 2665 c.Assert(l.noConflictWithOneNormalTable(source, schema, table2, t0, t3), IsFalse) 2666 } 2667 2668 func checkRedirectOp(c *C, task, source, schema, table string) bool { 2669 ops, _, err := GetAllOperations(etcdTestCli) 2670 c.Assert(err, IsNil) 2671 if _, ok := ops[task]; !ok { 2672 return false 2673 } 2674 if _, ok := ops[task][source]; !ok { 2675 return false 2676 } 2677 if _, ok := ops[task][source][schema]; !ok { 2678 return false 2679 } 2680 op, ok := ops[task][source][schema][table] 2681 if !ok { 2682 return false 2683 } 2684 return op.ConflictStage == ConflictResolved 2685 } 2686 2687 func (t *testLock) TestTrySyncForOneDDL(c *C) { 2688 var ( 2689 ID = "test-`foo`.`bar`" 2690 task = "test" 2691 source = "source" 2692 schema = "schema" 2693 downSchema = "downSchema" 2694 downTable = "downTable" 2695 table1 = "table1" 2696 table2 = "table2" 2697 p = parser.New() 2698 se = mock.NewContext() 2699 tblID int64 = 111 2700 ti0 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col1 int)`) 2701 ti1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col1 int, col2 int)`) 2702 ti2 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`) 2703 ti3 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col1 int, col3 int)`) 2704 ti4 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col2 int)`) 2705 ti5 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col3 int)`) 2706 ti6 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col3 varchar(4))`) 2707 ti7 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col3 int)`) 2708 ti8 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col3 varchar(4), col4 int not null)`) 2709 ti9 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col2 varchar(4), col4 int not null)`) 2710 ti10 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, col3 int, col4 int not null)`) 2711 t0 = schemacmp.Encode(ti0) 2712 t1 = schemacmp.Encode(ti1) 2713 t2 = schemacmp.Encode(ti2) 2714 t3 = schemacmp.Encode(ti3) 2715 t4 = schemacmp.Encode(ti4) 2716 t5 = schemacmp.Encode(ti5) 2717 t6 = schemacmp.Encode(ti6) 2718 t7 = schemacmp.Encode(ti7) 2719 t8 = schemacmp.Encode(ti8) 2720 t9 = schemacmp.Encode(ti9) 2721 t10 = schemacmp.Encode(ti10) 2722 tables = map[string]map[string]struct{}{ 2723 schema: {table1: struct{}{}, table2: struct{}{}}, 2724 } 2725 tts = []TargetTable{ 2726 newTargetTable(task, source, downSchema, downTable, tables), 2727 } 2728 l = NewLock(etcdTestCli, ID, task, downSchema, downTable, t0, tts, nil) 2729 ) 2730 2731 // check create table statement 2732 schemaChanged, conflictStage := l.trySyncForOneDDL(source, schema, table1, t0, t0) 2733 c.Assert(schemaChanged, IsTrue) 2734 c.Assert(conflictStage, Equals, ConflictNone) 2735 2736 // check alter table add column 2737 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table1, t0, t1) 2738 c.Assert(schemaChanged, IsTrue) 2739 c.Assert(conflictStage, Equals, ConflictNone) 2740 2741 // check create partition, no changed since https://github.com/pingcap/tidb-tools/blob/d671b0840063bc2532941f02e02e12627402844c/pkg/schemacmp/table.go#L251 2742 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table1, t0, t1) 2743 c.Assert(schemaChanged, IsTrue) 2744 c.Assert(conflictStage, Equals, ConflictNone) 2745 2746 // check alter table drop column 2747 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table2, t0, t2) 2748 c.Assert(schemaChanged, IsFalse) 2749 c.Assert(conflictStage, Equals, ConflictNone) 2750 2751 // check table rename column 2752 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table1, t1, t3) 2753 c.Assert(schemaChanged, IsFalse) 2754 c.Assert(conflictStage, Equals, ConflictSkipWaitRedirect) 2755 2756 // check other table add column 2757 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table2, t2, t4) 2758 c.Assert(schemaChanged, IsTrue) 2759 c.Assert(conflictStage, Equals, ConflictNone) 2760 2761 // check all table rename column 2762 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table2, t4, t5) 2763 c.Assert(schemaChanged, IsTrue) 2764 c.Assert(conflictStage, Equals, ConflictNone) 2765 // table1 redirect 2766 c.Assert(checkRedirectOp(c, task, source, schema, table1), IsTrue) 2767 2768 // check one table modify column 2769 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table2, t5, t6) 2770 c.Assert(schemaChanged, IsFalse) 2771 c.Assert(conflictStage, Equals, ConflictSkipWaitRedirect) 2772 2773 // check other table drop column 2774 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table1, t3, t7) 2775 c.Assert(schemaChanged, IsTrue) 2776 c.Assert(conflictStage, Equals, ConflictNone) 2777 2778 // check all table modify column 2779 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table1, t7, t6) 2780 c.Assert(schemaChanged, IsTrue) 2781 c.Assert(conflictStage, Equals, ConflictNone) 2782 // table2 redirect 2783 c.Assert(checkRedirectOp(c, task, source, schema, table2), IsTrue) 2784 2785 // check add column not null no default 2786 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table1, t6, t8) 2787 c.Assert(schemaChanged, IsFalse) 2788 c.Assert(conflictStage, Equals, ConflictSkipWaitRedirect) 2789 // check idempotent. 2790 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table1, t6, t8) 2791 c.Assert(schemaChanged, IsFalse) 2792 c.Assert(conflictStage, Equals, ConflictSkipWaitRedirect) 2793 2794 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table2, t6, t8) 2795 c.Assert(schemaChanged, IsTrue) 2796 c.Assert(conflictStage, Equals, ConflictNone) 2797 // table1 redirect 2798 c.Assert(checkRedirectOp(c, task, source, schema, table2), IsTrue) 2799 // check idempotent. 2800 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table2, t6, t8) 2801 c.Assert(schemaChanged, IsTrue) 2802 c.Assert(conflictStage, Equals, ConflictNone) 2803 2804 // check multiple conflict DDL 2805 // tb1 rename column 2806 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table1, t8, t9) 2807 c.Assert(schemaChanged, IsFalse) 2808 c.Assert(conflictStage, Equals, ConflictSkipWaitRedirect) 2809 // tb2 modify column 2810 schemaChanged, conflictStage = l.trySyncForOneDDL(source, schema, table2, t8, t10) 2811 c.Assert(schemaChanged, IsFalse) 2812 c.Assert(conflictStage, Equals, ConflictDetected) 2813 }