github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/syncer/sharding_group_test.go (about)

     1  // Copyright 2019 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package syncer
    15  
    16  import (
    17  	"context"
    18  	"fmt"
    19  	"sort"
    20  
    21  	"github.com/DATA-DOG/go-sqlmock"
    22  	"github.com/go-mysql-org/go-mysql/mysql"
    23  	. "github.com/pingcap/check"
    24  	"github.com/pingcap/tidb/pkg/util/dbutil"
    25  	"github.com/pingcap/tidb/pkg/util/filter"
    26  	"github.com/pingcap/tiflow/dm/config"
    27  	"github.com/pingcap/tiflow/dm/pb"
    28  	"github.com/pingcap/tiflow/dm/pkg/binlog"
    29  	"github.com/pingcap/tiflow/dm/pkg/conn"
    30  	tcontext "github.com/pingcap/tiflow/dm/pkg/context"
    31  	"github.com/pingcap/tiflow/dm/pkg/cputil"
    32  	"github.com/pingcap/tiflow/dm/pkg/retry"
    33  	"github.com/pingcap/tiflow/dm/pkg/terror"
    34  	"github.com/pingcap/tiflow/dm/pkg/utils"
    35  	"github.com/pingcap/tiflow/dm/syncer/dbconn"
    36  )
    37  
    38  var _ = Suite(&testShardingGroupSuite{})
    39  
    40  var (
    41  	targetTbl = &filter.Table{
    42  		Schema: "target_db",
    43  		Name:   "tbl",
    44  	}
    45  	target     = targetTbl.String()
    46  	sourceTbl1 = &filter.Table{Schema: "db1", Name: "tbl1"}
    47  	sourceTbl2 = &filter.Table{Schema: "db1", Name: "tbl2"}
    48  	sourceTbl3 = &filter.Table{Schema: "db1", Name: "tbl3"}
    49  	sourceTbl4 = &filter.Table{Schema: "db1", Name: "tbl4"}
    50  	source1    = sourceTbl1.String()
    51  	source2    = sourceTbl2.String()
    52  	source3    = sourceTbl3.String()
    53  	source4    = sourceTbl4.String()
    54  	pos11      = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000002", Pos: 123}}
    55  	endPos11   = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000002", Pos: 456}}
    56  	pos12      = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000002", Pos: 789}}
    57  	endPos12   = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000002", Pos: 999}}
    58  	pos21      = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000001", Pos: 123}}
    59  	endPos21   = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000001", Pos: 456}}
    60  	pos22      = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000001", Pos: 789}}
    61  	endPos22   = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000001", Pos: 999}}
    62  	pos3       = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000003", Pos: 123}}
    63  	endPos3    = binlog.Location{Position: mysql.Position{Name: "mysql-bin.000003", Pos: 456}}
    64  	ddls1      = []string{"DUMMY DDL"}
    65  	ddls2      = []string{"ANOTHER DUMMY DDL"}
    66  )
    67  
    68  type testShardingGroupSuite struct {
    69  	cfg *config.SubTaskConfig
    70  }
    71  
    72  func (t *testShardingGroupSuite) SetUpSuite(c *C) {
    73  	t.cfg = &config.SubTaskConfig{
    74  		SourceID:   "mysql-replica-01",
    75  		MetaSchema: "test",
    76  		Name:       "checkpoint_ut",
    77  	}
    78  }
    79  
    80  func (t *testShardingGroupSuite) TestLowestFirstPosInGroups(c *C) {
    81  	k := NewShardingGroupKeeper(tcontext.Background(), t.cfg, nil)
    82  
    83  	g1 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db1.tbl1", "db1.tbl2"}, nil, false, "", false)
    84  	// nolint:dogsled
    85  	_, _, _, err := g1.TrySync("db1.tbl1", pos11, endPos11, ddls1)
    86  	c.Assert(err, IsNil)
    87  
    88  	// lowest
    89  	g2 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db2.tbl1", "db2.tbl2"}, nil, false, "", false)
    90  	// nolint:dogsled
    91  	_, _, _, err = g2.TrySync("db2.tbl1", pos21, endPos21, ddls1)
    92  	c.Assert(err, IsNil)
    93  
    94  	g3 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db3.tbl1", "db3.tbl2"}, nil, false, "", false)
    95  	// nolint:dogsled
    96  	_, _, _, err = g3.TrySync("db3.tbl1", pos3, endPos3, ddls1)
    97  	c.Assert(err, IsNil)
    98  
    99  	k.groups["db1.tbl"] = g1
   100  	k.groups["db2.tbl"] = g2
   101  	k.groups["db3.tbl"] = g3
   102  
   103  	c.Assert(k.lowestFirstLocationInGroups().Position, DeepEquals, pos21.Position)
   104  }
   105  
   106  func (t *testShardingGroupSuite) TestMergeAndLeave(c *C) {
   107  	k := NewShardingGroupKeeper(tcontext.Background(), t.cfg, nil)
   108  	g1 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{source1, source2}, nil, false, "", false)
   109  	c.Assert(g1.Sources(), DeepEquals, map[string]bool{source1: false, source2: false})
   110  
   111  	needShardingHandle, synced, remain, err := g1.Merge([]string{source3})
   112  	c.Assert(err, IsNil)
   113  	c.Assert(needShardingHandle, IsFalse)
   114  	c.Assert(synced, IsFalse)
   115  	c.Assert(remain, Equals, 3)
   116  
   117  	// repeat merge has no side effect
   118  	needShardingHandle, synced, remain, err = g1.Merge([]string{source3})
   119  	c.Assert(err, IsNil)
   120  	c.Assert(needShardingHandle, IsFalse)
   121  	c.Assert(synced, IsFalse)
   122  	c.Assert(remain, Equals, 3)
   123  
   124  	err = g1.Leave([]string{source1})
   125  	c.Assert(err, IsNil)
   126  	c.Assert(g1.Sources(), DeepEquals, map[string]bool{source3: false, source2: false})
   127  
   128  	// repeat leave has no side effect
   129  	err = g1.Leave([]string{source1})
   130  	c.Assert(err, IsNil)
   131  	c.Assert(g1.Sources(), DeepEquals, map[string]bool{source3: false, source2: false})
   132  
   133  	ddls := []string{"DUMMY DDL"}
   134  	pos1 := mysql.Position{Name: "mysql-bin.000002", Pos: 123}
   135  	endPos1 := mysql.Position{Name: "mysql-bin.000002", Pos: 456}
   136  	// nolint:dogsled
   137  	_, _, _, err = g1.TrySync(source1, binlog.Location{Position: pos1}, binlog.Location{Position: endPos1}, ddls)
   138  	c.Assert(err, IsNil)
   139  
   140  	// nolint:dogsled
   141  	_, _, _, err = g1.Merge([]string{source1})
   142  	c.Assert(terror.ErrSyncUnitAddTableInSharding.Equal(err), IsTrue)
   143  	err = g1.Leave([]string{source2})
   144  	c.Assert(terror.ErrSyncUnitDropSchemaTableInSharding.Equal(err), IsTrue)
   145  }
   146  
   147  func (t *testShardingGroupSuite) TestSync(c *C) {
   148  	k := NewShardingGroupKeeper(tcontext.Background(), t.cfg, nil)
   149  	g1 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{source1, source2}, nil, false, "", false)
   150  	synced, active, remain, err := g1.TrySync(source1, pos11, endPos11, ddls1)
   151  	c.Assert(err, IsNil)
   152  	c.Assert(synced, IsFalse)
   153  	c.Assert(active, IsTrue)
   154  	c.Assert(remain, Equals, 1)
   155  	synced, active, remain, err = g1.TrySync(source1, pos12, endPos12, ddls2)
   156  	c.Assert(err, IsNil)
   157  	c.Assert(synced, IsFalse)
   158  	c.Assert(active, IsFalse)
   159  	c.Assert(remain, Equals, 1)
   160  
   161  	c.Assert(g1.FirstLocationUnresolved(), DeepEquals, &pos11)
   162  	c.Assert(g1.FirstEndPosUnresolved(), DeepEquals, &endPos11)
   163  	loc, err := g1.ActiveDDLFirstLocation()
   164  	c.Assert(err, IsNil)
   165  	c.Assert(loc, DeepEquals, pos11)
   166  
   167  	// not call `TrySync` for source2, beforeActiveDDL is always true
   168  	beforeActiveDDL := g1.CheckSyncing(source2, pos21)
   169  	c.Assert(beforeActiveDDL, IsTrue)
   170  
   171  	info := g1.UnresolvedGroupInfo()
   172  	shouldBe := &pb.ShardingGroup{Target: "", DDLs: ddls1, FirstLocation: pos11.String(), Synced: []string{source1}, Unsynced: []string{source2}}
   173  	c.Assert(info, DeepEquals, shouldBe)
   174  
   175  	// simple sort for [][]string{[]string{"db1", "tbl2"}, []string{"db1", "tbl1"}}
   176  	tbls1 := g1.Tables()
   177  	tbls2 := g1.UnresolvedTables()
   178  	if tbls1[0].Name != tbls2[0].Name {
   179  		tbls1[0], tbls1[1] = tbls1[1], tbls1[0]
   180  	}
   181  	c.Assert(tbls1, DeepEquals, tbls2)
   182  
   183  	// sync first DDL for source2, synced but not resolved
   184  	synced, active, remain, err = g1.TrySync(source2, pos21, endPos21, ddls1)
   185  	c.Assert(err, IsNil)
   186  	c.Assert(synced, IsTrue)
   187  	c.Assert(active, IsTrue)
   188  	c.Assert(remain, Equals, 0)
   189  
   190  	// active DDL is at pos21
   191  	beforeActiveDDL = g1.CheckSyncing(source2, pos21)
   192  	c.Assert(beforeActiveDDL, IsTrue)
   193  
   194  	info = g1.UnresolvedGroupInfo()
   195  	sort.Strings(info.Synced)
   196  	shouldBe = &pb.ShardingGroup{Target: "", DDLs: ddls1, FirstLocation: pos11.String(), Synced: []string{source1, source2}, Unsynced: []string{}}
   197  	c.Assert(info, DeepEquals, shouldBe)
   198  
   199  	resolved := g1.ResolveShardingDDL()
   200  	c.Assert(resolved, IsFalse)
   201  
   202  	// next active DDL not present
   203  	beforeActiveDDL = g1.CheckSyncing(source2, pos21)
   204  	c.Assert(beforeActiveDDL, IsTrue)
   205  
   206  	synced, active, remain, err = g1.TrySync(source2, pos22, endPos22, ddls2)
   207  	c.Assert(err, IsNil)
   208  	c.Assert(synced, IsTrue)
   209  	c.Assert(active, IsTrue)
   210  	c.Assert(remain, Equals, 0)
   211  	resolved = g1.ResolveShardingDDL()
   212  	c.Assert(resolved, IsTrue)
   213  
   214  	// caller should reset sharding group if DDL is successful executed
   215  	g1.Reset()
   216  
   217  	info = g1.UnresolvedGroupInfo()
   218  	c.Assert(info, IsNil)
   219  	c.Assert(g1.UnresolvedTables(), IsNil)
   220  }
   221  
   222  func (t *testShardingGroupSuite) TestTableID(c *C) {
   223  	originTables := []*filter.Table{
   224  		{Schema: "db", Name: "table"},
   225  		{Schema: `d"b`, Name: `t"able"`},
   226  		{Schema: "d`b", Name: "t`able"},
   227  	}
   228  	for _, originTable := range originTables {
   229  		// ignore isSchemaOnly
   230  		tableID := utils.GenTableID(originTable)
   231  		table := utils.UnpackTableID(tableID)
   232  		c.Assert(table, DeepEquals, originTable)
   233  	}
   234  }
   235  
   236  func (t *testShardingGroupSuite) TestKeeper(c *C) {
   237  	k := NewShardingGroupKeeper(tcontext.Background(), t.cfg, nil)
   238  	k.clear()
   239  	db, mock, err := sqlmock.New()
   240  	c.Assert(err, IsNil)
   241  	dbConn, err := db.Conn(context.Background())
   242  	c.Assert(err, IsNil)
   243  	k.db = conn.NewBaseDBForTest(db)
   244  	k.dbConn = dbconn.NewDBConn(t.cfg, conn.NewBaseConnForTest(dbConn, &retry.FiniteRetryStrategy{}))
   245  	mock.ExpectBegin()
   246  	mock.ExpectExec(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS `%s`", t.cfg.MetaSchema)).WillReturnResult(sqlmock.NewResult(1, 1))
   247  	mock.ExpectCommit()
   248  	mock.ExpectBegin()
   249  	mock.ExpectExec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.*", dbutil.TableName(t.cfg.MetaSchema, cputil.SyncerShardMeta(t.cfg.Name)))).WillReturnResult(sqlmock.NewResult(1, 1))
   250  	mock.ExpectCommit()
   251  	c.Assert(k.prepare(), IsNil)
   252  
   253  	// test meta
   254  
   255  	mock.ExpectQuery(" SELECT `target_table_id`, `source_table_id`, `active_index`, `is_global`, `data` FROM `test`.`checkpoint_ut_syncer_sharding_meta`.*").
   256  		WillReturnRows(sqlmock.NewRows([]string{"target_table_id", "source_table_id", "active_index", "is_global", "data"}))
   257  	meta, err := k.LoadShardMeta(mysql.MySQLFlavor, false)
   258  	c.Assert(err, IsNil)
   259  	c.Assert(meta, HasLen, 0)
   260  	mock.ExpectQuery(" SELECT `target_table_id`, `source_table_id`, `active_index`, `is_global`, `data` FROM `test`.`checkpoint_ut_syncer_sharding_meta`.*").
   261  		WillReturnRows(sqlmock.NewRows([]string{"target_table_id", "source_table_id", "active_index", "is_global", "data"}).
   262  			AddRow(target, "", 0, true, "[{\"ddls\":[\"DUMMY DDL\"],\"source\":\"`db1`.`tbl1`\",\"first-position\":{\"Name\":\"mysql-bin.000002\",\"Pos\":123},\"first-gtid-set\":\"\"},{\"ddls\":[\"ANOTHER DUMMY DDL\"],\"source\":\"`db1`.`tbl1`\",\"first-position\":{\"Name\":\"mysql-bin.000002\",\"Pos\":789},\"first-gtid-set\":\"\"}]").
   263  			AddRow(target, source1, 0, false, "[{\"ddls\":[\"DUMMY DDL\"],\"source\":\"`db1`.`tbl1`\",\"first-position\":{\"Name\":\"mysql-bin.000002\",\"Pos\":123},\"first-gtid-set\":\"\"},{\"ddls\":[\"ANOTHER DUMMY DDL\"],\"source\":\"`db1`.`tbl1`\",\"first-position\":{\"Name\":\"mysql-bin.000002\",\"Pos\":789},\"first-gtid-set\":\"\"}]"))
   264  
   265  	meta, err = k.LoadShardMeta(mysql.MySQLFlavor, false)
   266  	c.Assert(err, IsNil)
   267  	c.Assert(meta, HasLen, 1) // has meta of `target`
   268  
   269  	// test AddGroup and LeaveGroup
   270  
   271  	needShardingHandle, group, synced, remain, err := k.AddGroup(targetTbl, []string{source1}, nil, true)
   272  	c.Assert(err, IsNil)
   273  	c.Assert(needShardingHandle, IsFalse)
   274  	c.Assert(group, NotNil)
   275  	c.Assert(synced, IsFalse)
   276  	c.Assert(remain, Equals, 0) // first time doesn't return `remain`
   277  
   278  	needShardingHandle, group, synced, remain, err = k.AddGroup(targetTbl, []string{source2}, nil, true)
   279  	c.Assert(err, IsNil)
   280  	c.Assert(needShardingHandle, IsFalse)
   281  	c.Assert(group, NotNil)
   282  	c.Assert(synced, IsFalse)
   283  	c.Assert(remain, Equals, 2)
   284  
   285  	// test LeaveGroup
   286  	// nolint:dogsled
   287  	_, _, _, remain, err = k.AddGroup(targetTbl, []string{source3}, nil, true)
   288  	c.Assert(err, IsNil)
   289  	c.Assert(remain, Equals, 3)
   290  	// nolint:dogsled
   291  	_, _, _, remain, err = k.AddGroup(targetTbl, []string{source4}, nil, true)
   292  	c.Assert(err, IsNil)
   293  	c.Assert(remain, Equals, 4)
   294  	c.Assert(k.LeaveGroup(targetTbl, []string{source3, source4}), IsNil)
   295  
   296  	// test TrySync and InSyncing
   297  
   298  	needShardingHandle, group, synced, active, remain, err := k.TrySync(sourceTbl1, targetTbl, pos12, endPos12, ddls1)
   299  	c.Assert(err, IsNil)
   300  	c.Assert(needShardingHandle, IsTrue)
   301  	c.Assert(group.sources, DeepEquals, map[string]bool{source1: true, source2: false})
   302  	c.Assert(synced, IsFalse)
   303  	c.Assert(active, IsTrue)
   304  	c.Assert(remain, Equals, 1)
   305  
   306  	c.Assert(k.InSyncing(sourceTbl1, &filter.Table{Schema: targetTbl.Schema, Name: "wrong table"}, pos11), IsFalse)
   307  	loc, err := k.ActiveDDLFirstLocation(targetTbl)
   308  	c.Assert(err, IsNil)
   309  	// position before active DDL, not in syncing
   310  	c.Assert(binlog.CompareLocation(endPos11, loc, false), Equals, -1)
   311  	c.Assert(k.InSyncing(sourceTbl1, targetTbl, endPos11), IsFalse)
   312  	// position at/after active DDL, in syncing
   313  	c.Assert(binlog.CompareLocation(pos12, loc, false), Equals, 0)
   314  	c.Assert(k.InSyncing(sourceTbl1, targetTbl, pos12), IsFalse)
   315  	c.Assert(binlog.CompareLocation(endPos12, loc, false), Equals, 1)
   316  	c.Assert(k.InSyncing(sourceTbl1, targetTbl, endPos12), IsTrue)
   317  
   318  	needShardingHandle, group, synced, active, remain, err = k.TrySync(sourceTbl2, targetTbl, pos21, endPos21, ddls1)
   319  	c.Assert(err, IsNil)
   320  	c.Assert(needShardingHandle, IsTrue)
   321  	c.Assert(group.sources, DeepEquals, map[string]bool{source1: true, source2: true})
   322  	c.Assert(synced, IsTrue)
   323  	c.Assert(active, IsTrue)
   324  	c.Assert(remain, Equals, 0)
   325  
   326  	unresolvedTarget, unresolvedTables := k.UnresolvedTables()
   327  	c.Assert(unresolvedTarget, DeepEquals, map[string]bool{target: true})
   328  	// simple re-order
   329  	if unresolvedTables[0].Name > unresolvedTables[1].Name {
   330  		unresolvedTables[0], unresolvedTables[1] = unresolvedTables[1], unresolvedTables[0]
   331  	}
   332  	c.Assert(unresolvedTables, DeepEquals, []*filter.Table{sourceTbl1, sourceTbl2})
   333  
   334  	unresolvedGroups := k.UnresolvedGroups()
   335  	c.Assert(unresolvedGroups, HasLen, 1)
   336  	g := unresolvedGroups[0]
   337  	c.Assert(g.Unsynced, HasLen, 0)
   338  	c.Assert(g.DDLs, DeepEquals, ddls1)
   339  	c.Assert(g.FirstLocation, DeepEquals, pos12.String())
   340  
   341  	sqls, args := k.PrepareFlushSQLs(unresolvedTarget)
   342  	c.Assert(sqls, HasLen, 0)
   343  	c.Assert(args, HasLen, 0)
   344  
   345  	reset, err := k.ResolveShardingDDL(targetTbl)
   346  	c.Assert(err, IsNil)
   347  	c.Assert(reset, IsTrue)
   348  
   349  	k.ResetGroups()
   350  
   351  	unresolvedTarget, unresolvedTables = k.UnresolvedTables()
   352  	c.Assert(unresolvedTarget, HasLen, 0)
   353  	c.Assert(unresolvedTables, HasLen, 0)
   354  }