github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/pkg/lightning/backend/local/localhelper_test.go (about)

     1  // Copyright 2019 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package local
    15  
    16  import (
    17  	"bytes"
    18  	"context"
    19  	"math"
    20  	"math/rand"
    21  	"sort"
    22  	"sync"
    23  	"time"
    24  
    25  	. "github.com/pingcap/check"
    26  	"github.com/pingcap/errors"
    27  	"github.com/pingcap/kvproto/pkg/metapb"
    28  	"github.com/pingcap/kvproto/pkg/pdpb"
    29  	"github.com/pingcap/parser/mysql"
    30  	"github.com/pingcap/tidb/kv"
    31  	"github.com/pingcap/tidb/sessionctx/stmtctx"
    32  	"github.com/pingcap/tidb/tablecodec"
    33  	"github.com/pingcap/tidb/types"
    34  	"github.com/pingcap/tidb/util/codec"
    35  	"github.com/tikv/pd/server/core"
    36  	"github.com/tikv/pd/server/schedule/placement"
    37  	"go.uber.org/atomic"
    38  
    39  	"github.com/pingcap/br/pkg/lightning/glue"
    40  	"github.com/pingcap/br/pkg/restore"
    41  )
    42  
    43  type testClient struct {
    44  	mu           sync.RWMutex
    45  	stores       map[uint64]*metapb.Store
    46  	regions      map[uint64]*restore.RegionInfo
    47  	regionsInfo  *core.RegionsInfo // For now it's only used in ScanRegions
    48  	nextRegionID uint64
    49  	splitCount   atomic.Int32
    50  	hook         clientHook
    51  }
    52  
    53  func newTestClient(
    54  	stores map[uint64]*metapb.Store,
    55  	regions map[uint64]*restore.RegionInfo,
    56  	nextRegionID uint64,
    57  	hook clientHook,
    58  ) *testClient {
    59  	regionsInfo := core.NewRegionsInfo()
    60  	for _, regionInfo := range regions {
    61  		regionsInfo.SetRegion(core.NewRegionInfo(regionInfo.Region, regionInfo.Leader))
    62  	}
    63  	return &testClient{
    64  		stores:       stores,
    65  		regions:      regions,
    66  		regionsInfo:  regionsInfo,
    67  		nextRegionID: nextRegionID,
    68  		hook:         hook,
    69  	}
    70  }
    71  
    72  func (c *testClient) GetAllRegions() map[uint64]*restore.RegionInfo {
    73  	c.mu.RLock()
    74  	defer c.mu.RUnlock()
    75  	return c.regions
    76  }
    77  
    78  func (c *testClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) {
    79  	c.mu.RLock()
    80  	defer c.mu.RUnlock()
    81  	store, ok := c.stores[storeID]
    82  	if !ok {
    83  		return nil, errors.Errorf("store not found")
    84  	}
    85  	return store, nil
    86  }
    87  
    88  func (c *testClient) GetRegion(ctx context.Context, key []byte) (*restore.RegionInfo, error) {
    89  	c.mu.RLock()
    90  	defer c.mu.RUnlock()
    91  	for _, region := range c.regions {
    92  		if bytes.Compare(key, region.Region.StartKey) >= 0 &&
    93  			(len(region.Region.EndKey) == 0 || bytes.Compare(key, region.Region.EndKey) < 0) {
    94  			return region, nil
    95  		}
    96  	}
    97  	return nil, errors.Errorf("region not found: key=%s", string(key))
    98  }
    99  
   100  func (c *testClient) GetRegionByID(ctx context.Context, regionID uint64) (*restore.RegionInfo, error) {
   101  	c.mu.RLock()
   102  	defer c.mu.RUnlock()
   103  	region, ok := c.regions[regionID]
   104  	if !ok {
   105  		return nil, errors.Errorf("region not found: id=%d", regionID)
   106  	}
   107  	return region, nil
   108  }
   109  
   110  func (c *testClient) SplitRegion(
   111  	ctx context.Context,
   112  	regionInfo *restore.RegionInfo,
   113  	key []byte,
   114  ) (*restore.RegionInfo, error) {
   115  	c.mu.Lock()
   116  	defer c.mu.Unlock()
   117  	var target *restore.RegionInfo
   118  	splitKey := codec.EncodeBytes([]byte{}, key)
   119  	for _, region := range c.regions {
   120  		if bytes.Compare(splitKey, region.Region.StartKey) >= 0 &&
   121  			(len(region.Region.EndKey) == 0 || bytes.Compare(splitKey, region.Region.EndKey) < 0) {
   122  			target = region
   123  		}
   124  	}
   125  	if target == nil {
   126  		return nil, errors.Errorf("region not found: key=%s", string(key))
   127  	}
   128  	newRegion := &restore.RegionInfo{
   129  		Region: &metapb.Region{
   130  			Peers:    target.Region.Peers,
   131  			Id:       c.nextRegionID,
   132  			StartKey: target.Region.StartKey,
   133  			EndKey:   splitKey,
   134  			RegionEpoch: &metapb.RegionEpoch{
   135  				Version: target.Region.RegionEpoch.Version,
   136  				ConfVer: target.Region.RegionEpoch.ConfVer + 1,
   137  			},
   138  		},
   139  	}
   140  	c.regions[c.nextRegionID] = newRegion
   141  	c.regionsInfo.SetRegion(core.NewRegionInfo(newRegion.Region, newRegion.Leader))
   142  	c.nextRegionID++
   143  	target.Region.StartKey = splitKey
   144  	target.Region.RegionEpoch.ConfVer++
   145  	c.regions[target.Region.Id] = target
   146  	c.regionsInfo.SetRegion(core.NewRegionInfo(target.Region, target.Leader))
   147  	return newRegion, nil
   148  }
   149  
   150  func (c *testClient) BatchSplitRegionsWithOrigin(
   151  	ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte,
   152  ) (*restore.RegionInfo, []*restore.RegionInfo, error) {
   153  	c.mu.Lock()
   154  	defer c.mu.Unlock()
   155  	c.splitCount.Inc()
   156  
   157  	if c.hook != nil {
   158  		regionInfo, keys = c.hook.BeforeSplitRegion(ctx, regionInfo, keys)
   159  	}
   160  	if len(keys) == 0 {
   161  		return nil, nil, errors.New("no valid key")
   162  	}
   163  
   164  	select {
   165  	case <-ctx.Done():
   166  		return nil, nil, ctx.Err()
   167  	default:
   168  	}
   169  
   170  	newRegions := make([]*restore.RegionInfo, 0)
   171  	target, ok := c.regions[regionInfo.Region.Id]
   172  	if !ok {
   173  		return nil, nil, errors.New("region not found")
   174  	}
   175  	if target.Region.RegionEpoch.Version != regionInfo.Region.RegionEpoch.Version ||
   176  		target.Region.RegionEpoch.ConfVer != regionInfo.Region.RegionEpoch.ConfVer {
   177  		return regionInfo, nil, errors.New("epoch not match")
   178  	}
   179  	splitKeys := make([][]byte, 0, len(keys))
   180  	for _, k := range keys {
   181  		splitKey := codec.EncodeBytes([]byte{}, k)
   182  		splitKeys = append(splitKeys, splitKey)
   183  	}
   184  	sort.Slice(splitKeys, func(i, j int) bool {
   185  		return bytes.Compare(splitKeys[i], splitKeys[j]) < 0
   186  	})
   187  
   188  	startKey := target.Region.StartKey
   189  	for _, key := range splitKeys {
   190  		if bytes.Compare(key, startKey) <= 0 || bytes.Compare(key, target.Region.EndKey) >= 0 {
   191  			continue
   192  		}
   193  		newRegion := &restore.RegionInfo{
   194  			Region: &metapb.Region{
   195  				Peers:    target.Region.Peers,
   196  				Id:       c.nextRegionID,
   197  				StartKey: startKey,
   198  				EndKey:   key,
   199  			},
   200  		}
   201  		c.regions[c.nextRegionID] = newRegion
   202  		c.regionsInfo.SetRegion(core.NewRegionInfo(newRegion.Region, newRegion.Leader))
   203  		c.nextRegionID++
   204  		startKey = key
   205  		newRegions = append(newRegions, newRegion)
   206  	}
   207  	if !bytes.Equal(target.Region.StartKey, startKey) {
   208  		target.Region.StartKey = startKey
   209  		c.regions[target.Region.Id] = target
   210  		c.regionsInfo.SetRegion(core.NewRegionInfo(target.Region, target.Leader))
   211  	}
   212  
   213  	if len(newRegions) == 0 {
   214  		return target, nil, errors.New("no valid key")
   215  	}
   216  
   217  	var err error
   218  	if c.hook != nil {
   219  		newRegions, err = c.hook.AfterSplitRegion(ctx, target, keys, newRegions, nil)
   220  	}
   221  
   222  	return target, newRegions, err
   223  }
   224  
   225  func (c *testClient) BatchSplitRegions(
   226  	ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte,
   227  ) ([]*restore.RegionInfo, error) {
   228  	_, newRegions, err := c.BatchSplitRegionsWithOrigin(ctx, regionInfo, keys)
   229  	return newRegions, err
   230  }
   231  
   232  func (c *testClient) ScatterRegion(ctx context.Context, regionInfo *restore.RegionInfo) error {
   233  	return nil
   234  }
   235  
   236  func (c *testClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) {
   237  	return &pdpb.GetOperatorResponse{
   238  		Header: new(pdpb.ResponseHeader),
   239  	}, nil
   240  }
   241  
   242  func (c *testClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*restore.RegionInfo, error) {
   243  	if c.hook != nil {
   244  		key, endKey, limit = c.hook.BeforeScanRegions(ctx, key, endKey, limit)
   245  	}
   246  
   247  	infos := c.regionsInfo.ScanRange(key, endKey, limit)
   248  	regions := make([]*restore.RegionInfo, 0, len(infos))
   249  	for _, info := range infos {
   250  		regions = append(regions, &restore.RegionInfo{
   251  			Region: info.GetMeta(),
   252  			Leader: info.GetLeader(),
   253  		})
   254  	}
   255  
   256  	var err error
   257  	if c.hook != nil {
   258  		regions, err = c.hook.AfterScanRegions(regions, nil)
   259  	}
   260  	return regions, err
   261  }
   262  
   263  func (c *testClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) (r placement.Rule, err error) {
   264  	return
   265  }
   266  
   267  func (c *testClient) SetPlacementRule(ctx context.Context, rule placement.Rule) error {
   268  	return nil
   269  }
   270  
   271  func (c *testClient) DeletePlacementRule(ctx context.Context, groupID, ruleID string) error {
   272  	return nil
   273  }
   274  
   275  func (c *testClient) SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error {
   276  	return nil
   277  }
   278  
   279  func cloneRegion(region *restore.RegionInfo) *restore.RegionInfo {
   280  	r := &metapb.Region{}
   281  	if region.Region != nil {
   282  		b, _ := region.Region.Marshal()
   283  		_ = r.Unmarshal(b)
   284  	}
   285  
   286  	l := &metapb.Peer{}
   287  	if region.Region != nil {
   288  		b, _ := region.Region.Marshal()
   289  		_ = l.Unmarshal(b)
   290  	}
   291  	return &restore.RegionInfo{Region: r, Leader: l}
   292  }
   293  
   294  // region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, )
   295  func initTestClient(keys [][]byte, hook clientHook) *testClient {
   296  	peers := make([]*metapb.Peer, 1)
   297  	peers[0] = &metapb.Peer{
   298  		Id:      1,
   299  		StoreId: 1,
   300  	}
   301  	regions := make(map[uint64]*restore.RegionInfo)
   302  	for i := uint64(1); i < uint64(len(keys)); i++ {
   303  		startKey := keys[i-1]
   304  		if len(startKey) != 0 {
   305  			startKey = codec.EncodeBytes([]byte{}, startKey)
   306  		}
   307  		endKey := keys[i]
   308  		if len(endKey) != 0 {
   309  			endKey = codec.EncodeBytes([]byte{}, endKey)
   310  		}
   311  		regions[i] = &restore.RegionInfo{
   312  			Region: &metapb.Region{
   313  				Id:          i,
   314  				Peers:       peers,
   315  				StartKey:    startKey,
   316  				EndKey:      endKey,
   317  				RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 1},
   318  			},
   319  		}
   320  	}
   321  	stores := make(map[uint64]*metapb.Store)
   322  	stores[1] = &metapb.Store{
   323  		Id: 1,
   324  	}
   325  	return newTestClient(stores, regions, uint64(len(keys)), hook)
   326  }
   327  
   328  func checkRegionRanges(c *C, regions []*restore.RegionInfo, keys [][]byte) {
   329  	for i, r := range regions {
   330  		_, regionStart, _ := codec.DecodeBytes(r.Region.StartKey, []byte{})
   331  		_, regionEnd, _ := codec.DecodeBytes(r.Region.EndKey, []byte{})
   332  		c.Assert(regionStart, DeepEquals, keys[i])
   333  		c.Assert(regionEnd, DeepEquals, keys[i+1])
   334  	}
   335  }
   336  
   337  type clientHook interface {
   338  	BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte)
   339  	AfterSplitRegion(context.Context, *restore.RegionInfo, [][]byte, []*restore.RegionInfo, error) ([]*restore.RegionInfo, error)
   340  	BeforeScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]byte, []byte, int)
   341  	AfterScanRegions([]*restore.RegionInfo, error) ([]*restore.RegionInfo, error)
   342  }
   343  
   344  type noopHook struct{}
   345  
   346  func (h *noopHook) BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte) {
   347  	delayTime := rand.Int31n(10) + 1
   348  	time.Sleep(time.Duration(delayTime) * time.Millisecond)
   349  	return regionInfo, keys
   350  }
   351  
   352  func (h *noopHook) AfterSplitRegion(c context.Context, r *restore.RegionInfo, keys [][]byte, res []*restore.RegionInfo, err error) ([]*restore.RegionInfo, error) {
   353  	return res, err
   354  }
   355  
   356  func (h *noopHook) BeforeScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]byte, []byte, int) {
   357  	return key, endKey, limit
   358  }
   359  
   360  func (h *noopHook) AfterScanRegions(res []*restore.RegionInfo, err error) ([]*restore.RegionInfo, error) {
   361  	return res, err
   362  }
   363  
   364  type batchSplitHook interface {
   365  	setup(c *C) func()
   366  	check(c *C, cli *testClient)
   367  }
   368  
   369  type defaultHook struct{}
   370  
   371  func (d defaultHook) setup(*C) func() {
   372  	oldLimit := maxBatchSplitKeys
   373  	oldSplitBackoffTime := splitRegionBaseBackOffTime
   374  	maxBatchSplitKeys = 4
   375  	splitRegionBaseBackOffTime = time.Millisecond
   376  	return func() {
   377  		maxBatchSplitKeys = oldLimit
   378  		splitRegionBaseBackOffTime = oldSplitBackoffTime
   379  	}
   380  }
   381  
   382  func (d defaultHook) check(c *C, cli *testClient) {
   383  	// so with a batch split size of 4, there will be 7 time batch split
   384  	// 1. region: [aay, bba), keys: [b, ba, bb]
   385  	// 2. region: [bbh, cca), keys: [bc, bd, be, bf]
   386  	// 3. region: [bf, cca), keys: [bg, bh, bi, bj]
   387  	// 4. region: [bj, cca), keys: [bk, bl, bm, bn]
   388  	// 5. region: [bn, cca), keys: [bo, bp, bq, br]
   389  	// 6. region: [br, cca), keys: [bs, bt, bu, bv]
   390  	// 7. region: [bv, cca), keys: [bw, bx, by, bz]
   391  
   392  	// since it may encounter error retries, here only check the lower threshold.
   393  	c.Assert(cli.splitCount.Load() >= 7, IsTrue)
   394  }
   395  
   396  func (s *localSuite) doTestBatchSplitRegionByRanges(ctx context.Context, c *C, hook clientHook, errPat string, splitHook batchSplitHook) {
   397  	if splitHook == nil {
   398  		splitHook = defaultHook{}
   399  	}
   400  	deferFunc := splitHook.setup(c)
   401  	defer deferFunc()
   402  
   403  	keys := [][]byte{[]byte(""), []byte("aay"), []byte("bba"), []byte("bbh"), []byte("cca"), []byte("")}
   404  	client := initTestClient(keys, hook)
   405  	local := &local{
   406  		splitCli: client,
   407  		g:        glue.NewExternalTiDBGlue(nil, mysql.ModeNone),
   408  	}
   409  
   410  	// current region ranges: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, )
   411  	rangeStart := codec.EncodeBytes([]byte{}, []byte("b"))
   412  	rangeEnd := codec.EncodeBytes([]byte{}, []byte("c"))
   413  	regions, err := paginateScanRegion(ctx, client, rangeStart, rangeEnd, 5)
   414  	c.Assert(err, IsNil)
   415  	// regions is: [aay, bba), [bba, bbh), [bbh, cca)
   416  	checkRegionRanges(c, regions, [][]byte{[]byte("aay"), []byte("bba"), []byte("bbh"), []byte("cca")})
   417  
   418  	// generate:  ranges [b, ba), [ba, bb), [bb, bc), ... [by, bz)
   419  	ranges := make([]Range, 0)
   420  	start := []byte{'b'}
   421  	for i := byte('a'); i <= 'z'; i++ {
   422  		end := []byte{'b', i}
   423  		ranges = append(ranges, Range{start: start, end: end})
   424  		start = end
   425  	}
   426  
   427  	err = local.SplitAndScatterRegionByRanges(ctx, ranges, nil, true)
   428  	if len(errPat) == 0 {
   429  		c.Assert(err, IsNil)
   430  	} else {
   431  		c.Assert(err, ErrorMatches, errPat)
   432  		return
   433  	}
   434  
   435  	splitHook.check(c, client)
   436  
   437  	// check split ranges
   438  	regions, err = paginateScanRegion(ctx, client, rangeStart, rangeEnd, 5)
   439  	c.Assert(err, IsNil)
   440  	result := [][]byte{
   441  		[]byte("b"), []byte("ba"), []byte("bb"), []byte("bba"), []byte("bbh"), []byte("bc"),
   442  		[]byte("bd"), []byte("be"), []byte("bf"), []byte("bg"), []byte("bh"), []byte("bi"), []byte("bj"),
   443  		[]byte("bk"), []byte("bl"), []byte("bm"), []byte("bn"), []byte("bo"), []byte("bp"), []byte("bq"),
   444  		[]byte("br"), []byte("bs"), []byte("bt"), []byte("bu"), []byte("bv"), []byte("bw"), []byte("bx"),
   445  		[]byte("by"), []byte("bz"), []byte("cca"),
   446  	}
   447  	checkRegionRanges(c, regions, result)
   448  }
   449  
   450  func (s *localSuite) TestBatchSplitRegionByRanges(c *C) {
   451  	s.doTestBatchSplitRegionByRanges(context.Background(), c, nil, "", nil)
   452  }
   453  
   454  type batchSizeHook struct{}
   455  
   456  func (h batchSizeHook) setup(c *C) func() {
   457  	oldSizeLimit := maxBatchSplitSize
   458  	oldSplitBackoffTime := splitRegionBaseBackOffTime
   459  	maxBatchSplitSize = 6
   460  	splitRegionBaseBackOffTime = time.Millisecond
   461  	return func() {
   462  		maxBatchSplitSize = oldSizeLimit
   463  		splitRegionBaseBackOffTime = oldSplitBackoffTime
   464  	}
   465  }
   466  
   467  func (h batchSizeHook) check(c *C, cli *testClient) {
   468  	// so with a batch split key size of 6, there will be 9 time batch split
   469  	// 1. region: [aay, bba), keys: [b, ba, bb]
   470  	// 2. region: [bbh, cca), keys: [bc, bd, be]
   471  	// 3. region: [bf, cca), keys: [bf, bg, bh]
   472  	// 4. region: [bj, cca), keys: [bi, bj, bk]
   473  	// 5. region: [bj, cca), keys: [bl, bm, bn]
   474  	// 6. region: [bn, cca), keys: [bo, bp, bq]
   475  	// 7. region: [bn, cca), keys: [br, bs, bt]
   476  	// 9. region: [br, cca), keys: [bu, bv, bw]
   477  	// 10. region: [bv, cca), keys: [bx, by, bz]
   478  
   479  	// since it may encounter error retries, here only check the lower threshold.
   480  	c.Assert(cli.splitCount.Load(), Equals, int32(9))
   481  }
   482  
   483  func (s *localSuite) TestBatchSplitRegionByRangesKeySizeLimit(c *C) {
   484  	s.doTestBatchSplitRegionByRanges(context.Background(), c, nil, "", batchSizeHook{})
   485  }
   486  
   487  type scanRegionEmptyHook struct {
   488  	noopHook
   489  	cnt int
   490  }
   491  
   492  func (h *scanRegionEmptyHook) AfterScanRegions(res []*restore.RegionInfo, err error) ([]*restore.RegionInfo, error) {
   493  	h.cnt++
   494  	// skip the first call
   495  	if h.cnt == 1 {
   496  		return res, err
   497  	}
   498  	return nil, err
   499  }
   500  
   501  func (s *localSuite) TestBatchSplitRegionByRangesScanFailed(c *C) {
   502  	s.doTestBatchSplitRegionByRanges(context.Background(), c, &scanRegionEmptyHook{}, "paginate scan region returns empty result", defaultHook{})
   503  }
   504  
   505  type splitRegionEpochNotMatchHook struct {
   506  	noopHook
   507  }
   508  
   509  func (h *splitRegionEpochNotMatchHook) BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte) {
   510  	regionInfo, keys = h.noopHook.BeforeSplitRegion(ctx, regionInfo, keys)
   511  	regionInfo = cloneRegion(regionInfo)
   512  	// decrease the region epoch, so split region will fail
   513  	regionInfo.Region.RegionEpoch.Version--
   514  	return regionInfo, keys
   515  }
   516  
   517  func (s *localSuite) TestBatchSplitByRangesEpochNotMatch(c *C) {
   518  	s.doTestBatchSplitRegionByRanges(context.Background(), c, &splitRegionEpochNotMatchHook{}, "batch split regions failed: epoch not match.*", defaultHook{})
   519  }
   520  
   521  // return epoch not match error in every other call
   522  type splitRegionEpochNotMatchHookRandom struct {
   523  	noopHook
   524  	cnt atomic.Int32
   525  }
   526  
   527  func (h *splitRegionEpochNotMatchHookRandom) BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte) {
   528  	regionInfo, keys = h.noopHook.BeforeSplitRegion(ctx, regionInfo, keys)
   529  	if h.cnt.Inc() != 0 {
   530  		return regionInfo, keys
   531  	}
   532  	regionInfo = cloneRegion(regionInfo)
   533  	// decrease the region epoch, so split region will fail
   534  	regionInfo.Region.RegionEpoch.Version--
   535  	return regionInfo, keys
   536  }
   537  
   538  func (s *localSuite) TestBatchSplitByRangesEpochNotMatchOnce(c *C) {
   539  	s.doTestBatchSplitRegionByRanges(context.Background(), c, &splitRegionEpochNotMatchHookRandom{}, "", defaultHook{})
   540  }
   541  
   542  type splitRegionNoValidKeyHook struct {
   543  	noopHook
   544  	returnErrTimes int32
   545  	errorCnt       atomic.Int32
   546  }
   547  
   548  func (h *splitRegionNoValidKeyHook) BeforeSplitRegion(ctx context.Context, regionInfo *restore.RegionInfo, keys [][]byte) (*restore.RegionInfo, [][]byte) {
   549  	regionInfo, keys = h.noopHook.BeforeSplitRegion(ctx, regionInfo, keys)
   550  	if h.errorCnt.Inc() <= h.returnErrTimes {
   551  		// clean keys to trigger "no valid keys" error
   552  		keys = keys[:0]
   553  	}
   554  	return regionInfo, keys
   555  }
   556  
   557  func (s *localSuite) TestBatchSplitByRangesNoValidKeysOnce(c *C) {
   558  	s.doTestBatchSplitRegionByRanges(context.Background(), c, &splitRegionNoValidKeyHook{returnErrTimes: 1}, "", defaultHook{})
   559  }
   560  
   561  func (s *localSuite) TestBatchSplitByRangesNoValidKeys(c *C) {
   562  	s.doTestBatchSplitRegionByRanges(context.Background(), c, &splitRegionNoValidKeyHook{returnErrTimes: math.MaxInt32}, ".*no valid key.*", defaultHook{})
   563  }
   564  
   565  type reportAfterSplitHook struct {
   566  	noopHook
   567  	ch chan<- struct{}
   568  }
   569  
   570  func (h *reportAfterSplitHook) AfterSplitRegion(ctx context.Context, region *restore.RegionInfo, keys [][]byte, resultRegions []*restore.RegionInfo, err error) ([]*restore.RegionInfo, error) {
   571  	h.ch <- struct{}{}
   572  	return resultRegions, err
   573  }
   574  
   575  func (s *localSuite) TestBatchSplitByRangeCtxCanceled(c *C) {
   576  	ctx, cancel := context.WithCancel(context.Background())
   577  	ch := make(chan struct{})
   578  	// cancel ctx after the first region split success.
   579  	go func() {
   580  		i := 0
   581  		for range ch {
   582  			if i == 0 {
   583  				cancel()
   584  			}
   585  			i++
   586  		}
   587  	}()
   588  
   589  	s.doTestBatchSplitRegionByRanges(ctx, c, &reportAfterSplitHook{ch: ch}, ".*context canceled.*", defaultHook{})
   590  	close(ch)
   591  }
   592  
   593  func (s *localSuite) doTestBatchSplitByRangesWithClusteredIndex(c *C, hook clientHook) {
   594  	oldLimit := maxBatchSplitKeys
   595  	oldSplitBackoffTime := splitRegionBaseBackOffTime
   596  	maxBatchSplitKeys = 10
   597  	splitRegionBaseBackOffTime = time.Millisecond
   598  	defer func() {
   599  		maxBatchSplitKeys = oldLimit
   600  		splitRegionBaseBackOffTime = oldSplitBackoffTime
   601  	}()
   602  
   603  	stmtCtx := new(stmtctx.StatementContext)
   604  
   605  	tableID := int64(1)
   606  	tableStartKey := tablecodec.EncodeTablePrefix(tableID)
   607  	tableEndKey := tablecodec.EncodeTablePrefix(tableID + 1)
   608  	keys := [][]byte{[]byte(""), tableStartKey}
   609  	// pre split 2 regions
   610  	for i := int64(0); i < 2; i++ {
   611  		keyBytes, err := codec.EncodeKey(stmtCtx, nil, types.NewIntDatum(i))
   612  		c.Assert(err, IsNil)
   613  		h, err := kv.NewCommonHandle(keyBytes)
   614  		c.Assert(err, IsNil)
   615  		key := tablecodec.EncodeRowKeyWithHandle(tableID, h)
   616  		keys = append(keys, key)
   617  	}
   618  	keys = append(keys, tableEndKey, []byte(""))
   619  	client := initTestClient(keys, hook)
   620  	local := &local{
   621  		splitCli: client,
   622  		g:        glue.NewExternalTiDBGlue(nil, mysql.ModeNone),
   623  	}
   624  	ctx := context.Background()
   625  
   626  	// we batch generate a batch of row keys for table 1 with common handle
   627  	rangeKeys := make([][]byte, 0, 20+1)
   628  	for i := int64(0); i < 2; i++ {
   629  		for j := int64(0); j < 10; j++ {
   630  			keyBytes, err := codec.EncodeKey(stmtCtx, nil, types.NewIntDatum(i), types.NewIntDatum(j*10000))
   631  			c.Assert(err, IsNil)
   632  			h, err := kv.NewCommonHandle(keyBytes)
   633  			c.Assert(err, IsNil)
   634  			key := tablecodec.EncodeRowKeyWithHandle(tableID, h)
   635  			rangeKeys = append(rangeKeys, key)
   636  		}
   637  	}
   638  
   639  	start := rangeKeys[0]
   640  	ranges := make([]Range, 0, len(rangeKeys)-1)
   641  	for _, e := range rangeKeys[1:] {
   642  		ranges = append(ranges, Range{start: start, end: e})
   643  		start = e
   644  	}
   645  
   646  	err := local.SplitAndScatterRegionByRanges(ctx, ranges, nil, true)
   647  	c.Assert(err, IsNil)
   648  
   649  	startKey := codec.EncodeBytes([]byte{}, rangeKeys[0])
   650  	endKey := codec.EncodeBytes([]byte{}, rangeKeys[len(rangeKeys)-1])
   651  	// check split ranges
   652  	regions, err := paginateScanRegion(ctx, client, startKey, endKey, 5)
   653  	c.Assert(err, IsNil)
   654  	c.Assert(len(regions), Equals, len(ranges)+1)
   655  
   656  	checkKeys := append([][]byte{}, rangeKeys[:10]...)
   657  	checkKeys = append(checkKeys, keys[3])
   658  	checkKeys = append(checkKeys, rangeKeys[10:]...)
   659  	checkRegionRanges(c, regions, checkKeys)
   660  }
   661  
   662  func (s *localSuite) TestBatchSplitByRangesWithClusteredIndex(c *C) {
   663  	s.doTestBatchSplitByRangesWithClusteredIndex(c, nil)
   664  }
   665  
   666  func (s *localSuite) TestBatchSplitByRangesWithClusteredIndexEpochNotMatch(c *C) {
   667  	s.doTestBatchSplitByRangesWithClusteredIndex(c, &splitRegionEpochNotMatchHookRandom{})
   668  }
   669  
   670  func (s *localSuite) TestNeedSplit(c *C) {
   671  	tableID := int64(1)
   672  	peers := make([]*metapb.Peer, 1)
   673  	peers[0] = &metapb.Peer{
   674  		Id:      1,
   675  		StoreId: 1,
   676  	}
   677  	keys := []int64{10, 100, 500, 1000, 999999, -1}
   678  	start := tablecodec.EncodeRowKeyWithHandle(tableID, kv.IntHandle(0))
   679  	regionStart := codec.EncodeBytes([]byte{}, start)
   680  	regions := make([]*restore.RegionInfo, 0)
   681  	for _, end := range keys {
   682  		var regionEndKey []byte
   683  		if end >= 0 {
   684  			endKey := tablecodec.EncodeRowKeyWithHandle(tableID, kv.IntHandle(end))
   685  			regionEndKey = codec.EncodeBytes([]byte{}, endKey)
   686  		}
   687  		region := &restore.RegionInfo{
   688  			Region: &metapb.Region{
   689  				Id:          1,
   690  				Peers:       peers,
   691  				StartKey:    regionStart,
   692  				EndKey:      regionEndKey,
   693  				RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 1},
   694  			},
   695  		}
   696  		regions = append(regions, region)
   697  		regionStart = regionEndKey
   698  	}
   699  
   700  	checkMap := map[int64]int{
   701  		0:         -1,
   702  		5:         0,
   703  		99:        1,
   704  		100:       -1,
   705  		512:       3,
   706  		8888:      4,
   707  		999999:    -1,
   708  		100000000: 5,
   709  	}
   710  
   711  	for hdl, idx := range checkMap {
   712  		checkKey := tablecodec.EncodeRowKeyWithHandle(tableID, kv.IntHandle(hdl))
   713  		res := needSplit(checkKey, regions)
   714  		if idx < 0 {
   715  			c.Assert(res, IsNil)
   716  		} else {
   717  			c.Assert(res, DeepEquals, regions[idx])
   718  		}
   719  	}
   720  }