github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/syncer/opt_sharding_group_test.go (about)

     1  // Copyright 2022 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package syncer
    15  
    16  import (
    17  	"context"
    18  	"testing"
    19  
    20  	"github.com/pingcap/tiflow/dm/config"
    21  	"github.com/pingcap/tiflow/dm/pkg/binlog"
    22  	tcontext "github.com/pingcap/tiflow/dm/pkg/context"
    23  	"github.com/pingcap/tiflow/dm/pkg/log"
    24  	"github.com/pingcap/tiflow/dm/pkg/schema"
    25  	"github.com/pingcap/tiflow/dm/pkg/utils"
    26  	"github.com/pingcap/tiflow/dm/syncer/shardddl"
    27  	"github.com/stretchr/testify/require"
    28  	"github.com/stretchr/testify/suite"
    29  )
    30  
    31  type optShardingGroupSuite struct {
    32  	suite.Suite
    33  	cfg *config.SubTaskConfig
    34  }
    35  
    36  func (s *optShardingGroupSuite) SetupSuite() {
    37  	s.cfg = &config.SubTaskConfig{
    38  		SourceID:   "mysql-replica-01",
    39  		MetaSchema: "test",
    40  		Name:       "checkpoint_ut",
    41  	}
    42  	require.NoError(s.T(), log.InitLogger(&log.Config{}))
    43  }
    44  
    45  func TestOptShardingGroupSuite(t *testing.T) {
    46  	suite.Run(t, new(optShardingGroupSuite))
    47  }
    48  
    49  func (s *optShardingGroupSuite) TestLowestFirstPosInOptGroups() {
    50  	s.T().Parallel()
    51  	var (
    52  		db1tbl     = "`db1`.`tbl`"
    53  		db2tbl     = "`db2`.`tbl`"
    54  		db3tbl     = "`db3`.`tbl`"
    55  		sourceTbls = []string{"`db1`.`tbl1`", "`db1`.`tbl2`", "`db2`.`tbl1`", "`db2`.`tbl2`", "`db3`.`tbl1`"}
    56  		targetTbls = []string{db1tbl, db1tbl, db2tbl, db2tbl, db3tbl}
    57  		positions  = []binlog.Location{pos11, pos12, pos21, pos22, pos3}
    58  	)
    59  
    60  	k := NewOptShardingGroupKeeper(tcontext.Background(), s.cfg)
    61  	for i := range sourceTbls {
    62  		k.appendConflictTable(utils.UnpackTableID(sourceTbls[i]), utils.UnpackTableID(targetTbls[i]), positions[i], "", false)
    63  	}
    64  
    65  	require.Equal(s.T(), pos21.Position, k.lowestFirstLocationInGroups().Position)
    66  	k.resolveGroup(utils.UnpackTableID(db2tbl))
    67  	k.addShardingReSync(&ShardingReSync{
    68  		targetTable:  utils.UnpackTableID(db2tbl),
    69  		currLocation: pos21,
    70  	})
    71  	// should still be pos21, because it's added to shardingReSyncs
    72  	require.Equal(s.T(), pos21.Position, k.lowestFirstLocationInGroups().Position)
    73  	k.removeShardingReSync(&ShardingReSync{targetTable: utils.UnpackTableID(db2tbl)})
    74  	// should be pos11 now, pos21 is totally resolved
    75  	require.Equal(s.T(), pos11.Position, k.lowestFirstLocationInGroups().Position)
    76  
    77  	// reset
    78  	k.Reset()
    79  	require.Len(s.T(), k.groups, 0)
    80  	require.Len(s.T(), k.shardingReSyncs, 0)
    81  }
    82  
    83  func (s *optShardingGroupSuite) TestSync() {
    84  	s.T().Parallel()
    85  	var (
    86  		db1tbl     = "`db1`.`tbl`"
    87  		db2tbl     = "`db2`.`tbl`"
    88  		db3tbl     = "`db3`.`tbl`"
    89  		sourceTbls = []string{"`db1`.`tbl1`", "`db1`.`tbl2`", "`db2`.`tbl1`", "`db2`.`tbl2`", "`db3`.`tbl1`"}
    90  		targetTbls = []string{db1tbl, db1tbl, db2tbl, db2tbl, db3tbl}
    91  		positions  = []binlog.Location{pos11, pos12, pos21, pos22, pos3}
    92  		logger     = log.L()
    93  		err        error
    94  	)
    95  
    96  	k := NewOptShardingGroupKeeper(tcontext.Background(), s.cfg)
    97  	for i := range sourceTbls {
    98  		k.appendConflictTable(utils.UnpackTableID(sourceTbls[i]), utils.UnpackTableID(targetTbls[i]), positions[i], "", false)
    99  	}
   100  
   101  	shardingReSyncCh := make(chan *ShardingReSync, 10)
   102  
   103  	syncer := Syncer{
   104  		osgk:       k,
   105  		tctx:       tcontext.Background().WithLogger(logger),
   106  		optimist:   shardddl.NewOptimist(&logger, nil, "", ""),
   107  		checkpoint: &mockCheckpoint{},
   108  	}
   109  	syncer.schemaTracker, err = schema.NewTestTracker(context.Background(), s.cfg.Name, syncer.downstreamTrackConn, log.L())
   110  	require.NoError(s.T(), err)
   111  
   112  	// case 1: mock receive resolved stage from dm-master when syncing other tables
   113  	require.Equal(s.T(), pos21.Position, k.lowestFirstLocationInGroups().Position)
   114  	require.True(s.T(), k.tableInConflict(utils.UnpackTableID(db2tbl)))
   115  	require.True(s.T(), k.inConflictStage(utils.UnpackTableID(sourceTbls[3]), utils.UnpackTableID(db2tbl)))
   116  	syncer.resolveOptimisticDDL(&eventContext{
   117  		shardingReSyncCh: &shardingReSyncCh,
   118  		endLocation:      endPos3,
   119  	}, utils.UnpackTableID(sourceTbls[2]), utils.UnpackTableID(db2tbl))
   120  	require.False(s.T(), k.tableInConflict(utils.UnpackTableID(db2tbl)))
   121  	require.False(s.T(), k.inConflictStage(utils.UnpackTableID(sourceTbls[3]), utils.UnpackTableID(db2tbl)))
   122  	require.Len(s.T(), shardingReSyncCh, 1)
   123  	shardingResync := <-shardingReSyncCh
   124  	expectedShardingResync := &ShardingReSync{
   125  		currLocation:   pos21,
   126  		latestLocation: endPos3,
   127  		targetTable:    utils.UnpackTableID(db2tbl),
   128  		allResolved:    true,
   129  	}
   130  	require.Equal(s.T(), expectedShardingResync, shardingResync)
   131  	// the ShardingResync is not removed from osgk, so lowest location is still pos21
   132  	require.Equal(s.T(), pos21.Position, k.lowestFirstLocationInGroups().Position)
   133  	k.removeShardingReSync(shardingResync)
   134  
   135  	// case 2: mock receive resolved stage from dm-master in handleQueryEventOptimistic
   136  	require.Equal(s.T(), pos11.Position, k.lowestFirstLocationInGroups().Position)
   137  	require.True(s.T(), k.tableInConflict(utils.UnpackTableID(db1tbl)))
   138  	require.True(s.T(), k.inConflictStage(utils.UnpackTableID(sourceTbls[0]), utils.UnpackTableID(db1tbl)))
   139  	syncer.resolveOptimisticDDL(&eventContext{
   140  		shardingReSyncCh: &shardingReSyncCh,
   141  		endLocation:      endPos12,
   142  	}, utils.UnpackTableID(sourceTbls[1]), utils.UnpackTableID(db1tbl))
   143  	require.False(s.T(), k.tableInConflict(utils.UnpackTableID(db1tbl)))
   144  	require.False(s.T(), k.inConflictStage(utils.UnpackTableID(sourceTbls[0]), utils.UnpackTableID(db1tbl)))
   145  	require.Len(s.T(), shardingReSyncCh, 1)
   146  	shardingResync = <-shardingReSyncCh
   147  	expectedShardingResync = &ShardingReSync{
   148  		currLocation:   pos11,
   149  		latestLocation: endPos12,
   150  		targetTable:    utils.UnpackTableID(db1tbl),
   151  		allResolved:    true,
   152  	}
   153  	require.Equal(s.T(), expectedShardingResync, shardingResync)
   154  	require.Equal(s.T(), pos11.Position, k.lowestFirstLocationInGroups().Position)
   155  	k.removeShardingReSync(shardingResync)
   156  
   157  	// case 3: mock drop table, should resolve conflict stage
   158  	require.Equal(s.T(), pos3.Position, k.lowestFirstLocationInGroups().Position)
   159  	require.True(s.T(), k.tableInConflict(utils.UnpackTableID(db3tbl)))
   160  	require.True(s.T(), k.inConflictStage(utils.UnpackTableID(sourceTbls[4]), utils.UnpackTableID(db3tbl)))
   161  	k.RemoveGroup(utils.UnpackTableID(db3tbl), []string{sourceTbls[4]})
   162  	require.False(s.T(), k.tableInConflict(utils.UnpackTableID(db3tbl)))
   163  	require.False(s.T(), k.inConflictStage(utils.UnpackTableID(sourceTbls[4]), utils.UnpackTableID(db3tbl)))
   164  	require.Len(s.T(), shardingReSyncCh, 0)
   165  }