github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/pkg/lightning/backend/local/localhelper.go (about)

     1  // Copyright 2020 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package local
    15  
    16  import (
    17  	"bytes"
    18  	"context"
    19  	"database/sql"
    20  	"regexp"
    21  	"runtime"
    22  	"sort"
    23  	"strings"
    24  	"sync"
    25  	"time"
    26  
    27  	"github.com/docker/go-units"
    28  	"github.com/pingcap/errors"
    29  	sst "github.com/pingcap/kvproto/pkg/import_sstpb"
    30  	"github.com/pingcap/kvproto/pkg/metapb"
    31  	"github.com/pingcap/kvproto/pkg/pdpb"
    32  	"github.com/pingcap/tidb/util/codec"
    33  	"go.uber.org/multierr"
    34  	"go.uber.org/zap"
    35  	"golang.org/x/sync/errgroup"
    36  
    37  	"github.com/pingcap/br/pkg/lightning/checkpoints"
    38  	"github.com/pingcap/br/pkg/lightning/common"
    39  	"github.com/pingcap/br/pkg/lightning/log"
    40  	"github.com/pingcap/br/pkg/logutil"
    41  	split "github.com/pingcap/br/pkg/restore"
    42  	"github.com/pingcap/br/pkg/utils"
    43  )
    44  
    45  const (
    46  	SplitRetryTimes       = 8
    47  	retrySplitMaxWaitTime = 4 * time.Second
    48  )
    49  
    50  var (
    51  	// the max keys count in a batch to split one region
    52  	maxBatchSplitKeys = 4096
    53  	// the max total key size in a split region batch.
    54  	// our threshold should be smaller than TiKV's raft max entry size(default is 8MB).
    55  	maxBatchSplitSize = 6 * units.MiB
    56  	// the base exponential backoff time
    57  	// the variable is only changed in unit test for running test faster.
    58  	splitRegionBaseBackOffTime = time.Second
    59  )
    60  
    61  // TODO remove this file and use br internal functions
    62  // This File include region split & scatter operation just like br.
    63  // we can simply call br function, but we need to change some function signature of br
    64  // When the ranges total size is small, we can skip the split to avoid generate empty regions.
    65  func (local *local) SplitAndScatterRegionByRanges(
    66  	ctx context.Context,
    67  	ranges []Range,
    68  	tableInfo *checkpoints.TidbTableInfo,
    69  	needSplit bool,
    70  ) error {
    71  	if len(ranges) == 0 {
    72  		return nil
    73  	}
    74  
    75  	db, err := local.g.GetDB()
    76  	if err != nil {
    77  		return errors.Trace(err)
    78  	}
    79  
    80  	minKey := codec.EncodeBytes([]byte{}, ranges[0].start)
    81  	maxKey := codec.EncodeBytes([]byte{}, ranges[len(ranges)-1].end)
    82  
    83  	scatterRegions := make([]*split.RegionInfo, 0)
    84  	var retryKeys [][]byte
    85  	waitTime := splitRegionBaseBackOffTime
    86  	skippedKeys := 0
    87  	for i := 0; i < SplitRetryTimes; i++ {
    88  		log.L().Info("split and scatter region",
    89  			logutil.Key("minKey", minKey),
    90  			logutil.Key("maxKey", maxKey),
    91  			zap.Int("retry", i),
    92  		)
    93  		err = nil
    94  		if i > 0 {
    95  			select {
    96  			case <-time.After(waitTime):
    97  			case <-ctx.Done():
    98  				return ctx.Err()
    99  			}
   100  			waitTime *= 2
   101  			if waitTime > retrySplitMaxWaitTime {
   102  				waitTime = retrySplitMaxWaitTime
   103  			}
   104  		}
   105  		var regions []*split.RegionInfo
   106  		regions, err = paginateScanRegion(ctx, local.splitCli, minKey, maxKey, 128)
   107  		log.L().Info("paginate scan regions", zap.Int("count", len(regions)),
   108  			logutil.Key("start", minKey), logutil.Key("end", maxKey))
   109  		if err != nil {
   110  			log.L().Warn("paginate scan region failed", logutil.Key("minKey", minKey), logutil.Key("maxKey", maxKey),
   111  				log.ShortError(err), zap.Int("retry", i))
   112  			continue
   113  		}
   114  
   115  		if len(regions) == 0 {
   116  			log.L().Warn("paginate scan region returns empty result", logutil.Key("minKey", minKey), logutil.Key("maxKey", maxKey),
   117  				zap.Int("retry", i))
   118  			return errors.New("paginate scan region returns empty result")
   119  		}
   120  
   121  		log.L().Info("paginate scan region finished", logutil.Key("minKey", minKey), logutil.Key("maxKey", maxKey),
   122  			zap.Int("regions", len(regions)))
   123  
   124  		if !needSplit {
   125  			scatterRegions = append(scatterRegions, regions...)
   126  			break
   127  		}
   128  
   129  		needSplitRanges := make([]Range, 0, len(ranges))
   130  		startKey := make([]byte, 0)
   131  		endKey := make([]byte, 0)
   132  		for _, r := range ranges {
   133  			startKey = codec.EncodeBytes(startKey, r.start)
   134  			endKey = codec.EncodeBytes(endKey, r.end)
   135  			idx := sort.Search(len(regions), func(i int) bool {
   136  				return beforeEnd(startKey, regions[i].Region.EndKey)
   137  			})
   138  			if idx < 0 || idx >= len(regions) {
   139  				log.L().Error("target region not found", logutil.Key("start_key", startKey),
   140  					logutil.RegionBy("first_region", regions[0].Region),
   141  					logutil.RegionBy("last_region", regions[len(regions)-1].Region))
   142  				return errors.New("target region not found")
   143  			}
   144  			if bytes.Compare(startKey, regions[idx].Region.StartKey) > 0 || bytes.Compare(endKey, regions[idx].Region.EndKey) < 0 {
   145  				needSplitRanges = append(needSplitRanges, r)
   146  			}
   147  		}
   148  		ranges = needSplitRanges
   149  		if len(ranges) == 0 {
   150  			log.L().Info("no ranges need to be split, skipped.")
   151  			return nil
   152  		}
   153  
   154  		var tableRegionStats map[uint64]int64
   155  		if tableInfo != nil {
   156  			tableRegionStats, err = fetchTableRegionSizeStats(ctx, db, tableInfo.ID)
   157  			if err != nil {
   158  				log.L().Warn("fetch table region size statistics failed",
   159  					zap.String("table", tableInfo.Name), zap.Error(err))
   160  				tableRegionStats = make(map[uint64]int64)
   161  			}
   162  		}
   163  
   164  		regionMap := make(map[uint64]*split.RegionInfo)
   165  		for _, region := range regions {
   166  			regionMap[region.Region.GetId()] = region
   167  		}
   168  
   169  		var splitKeyMap map[uint64][][]byte
   170  		if len(retryKeys) > 0 {
   171  			firstKeyEnc := codec.EncodeBytes([]byte{}, retryKeys[0])
   172  			lastKeyEnc := codec.EncodeBytes([]byte{}, retryKeys[len(retryKeys)-1])
   173  			if bytes.Compare(firstKeyEnc, regions[0].Region.StartKey) < 0 || !beforeEnd(lastKeyEnc, regions[len(regions)-1].Region.EndKey) {
   174  				log.L().Warn("no valid key for split region",
   175  					logutil.Key("firstKey", firstKeyEnc), logutil.Key("lastKey", lastKeyEnc),
   176  					logutil.Key("firstRegionStart", regions[0].Region.StartKey),
   177  					logutil.Key("lastRegionEnd", regions[len(regions)-1].Region.EndKey))
   178  				return errors.New("check split keys failed")
   179  			}
   180  			splitKeyMap = getSplitKeys(retryKeys, regions)
   181  			retryKeys = retryKeys[:0]
   182  		} else {
   183  			splitKeyMap = getSplitKeysByRanges(ranges, regions)
   184  		}
   185  
   186  		type splitInfo struct {
   187  			region *split.RegionInfo
   188  			keys   [][]byte
   189  		}
   190  
   191  		var syncLock sync.Mutex
   192  		// TODO, make this size configurable
   193  		size := utils.MinInt(len(splitKeyMap), runtime.GOMAXPROCS(0))
   194  		ch := make(chan *splitInfo, size)
   195  		eg, splitCtx := errgroup.WithContext(ctx)
   196  
   197  		for splitWorker := 0; splitWorker < size; splitWorker++ {
   198  			eg.Go(func() error {
   199  				for sp := range ch {
   200  					var newRegions []*split.RegionInfo
   201  					var err1 error
   202  					region := sp.region
   203  					keys := sp.keys
   204  					sort.Slice(keys, func(i, j int) bool {
   205  						return bytes.Compare(keys[i], keys[j]) < 0
   206  					})
   207  					splitRegion := region
   208  					startIdx := 0
   209  					endIdx := 0
   210  					batchKeySize := 0
   211  					for endIdx <= len(keys) {
   212  						if endIdx == len(keys) || batchKeySize+len(keys[endIdx]) > maxBatchSplitSize || endIdx-startIdx >= maxBatchSplitKeys {
   213  							splitRegionStart := codec.EncodeBytes([]byte{}, keys[startIdx])
   214  							splitRegionEnd := codec.EncodeBytes([]byte{}, keys[endIdx-1])
   215  							if bytes.Compare(splitRegionStart, splitRegion.Region.StartKey) < 0 || !beforeEnd(splitRegionEnd, splitRegion.Region.EndKey) {
   216  								log.L().Fatal("no valid key in region",
   217  									logutil.Key("startKey", splitRegionStart), logutil.Key("endKey", splitRegionEnd),
   218  									logutil.Key("regionStart", splitRegion.Region.StartKey), logutil.Key("regionEnd", splitRegion.Region.EndKey),
   219  									logutil.Region(splitRegion.Region), logutil.Leader(splitRegion.Leader))
   220  							}
   221  							splitRegion, newRegions, err1 = local.BatchSplitRegions(splitCtx, splitRegion, keys[startIdx:endIdx])
   222  							if err1 != nil {
   223  								if strings.Contains(err1.Error(), "no valid key") {
   224  									for _, key := range keys {
   225  										log.L().Warn("no valid key",
   226  											logutil.Key("startKey", region.Region.StartKey),
   227  											logutil.Key("endKey", region.Region.EndKey),
   228  											logutil.Key("key", codec.EncodeBytes([]byte{}, key)))
   229  									}
   230  									return err1
   231  								} else if common.IsContextCanceledError(err1) {
   232  									// do not retry on context.Canceled error
   233  									return err1
   234  								}
   235  								log.L().Warn("split regions", log.ShortError(err1), zap.Int("retry time", i),
   236  									zap.Uint64("region_id", region.Region.Id))
   237  
   238  								syncLock.Lock()
   239  								retryKeys = append(retryKeys, keys[startIdx:]...)
   240  								// set global error so if we exceed retry limit, the function will return this error
   241  								err = multierr.Append(err, err1)
   242  								syncLock.Unlock()
   243  								break
   244  							} else {
   245  								log.L().Info("batch split region", zap.Uint64("region_id", splitRegion.Region.Id),
   246  									zap.Int("keys", endIdx-startIdx), zap.Binary("firstKey", keys[startIdx]),
   247  									zap.Binary("end", keys[endIdx-1]))
   248  								sort.Slice(newRegions, func(i, j int) bool {
   249  									return bytes.Compare(newRegions[i].Region.StartKey, newRegions[j].Region.StartKey) < 0
   250  								})
   251  								syncLock.Lock()
   252  								scatterRegions = append(scatterRegions, newRegions...)
   253  								syncLock.Unlock()
   254  								// the region with the max start key is the region need to be further split.
   255  								if bytes.Compare(splitRegion.Region.StartKey, newRegions[len(newRegions)-1].Region.StartKey) < 0 {
   256  									splitRegion = newRegions[len(newRegions)-1]
   257  								}
   258  							}
   259  							batchKeySize = 0
   260  							startIdx = endIdx
   261  						}
   262  						if endIdx < len(keys) {
   263  							batchKeySize += len(keys[endIdx])
   264  						}
   265  						endIdx++
   266  					}
   267  				}
   268  				return nil
   269  			})
   270  		}
   271  	sendLoop:
   272  		for regionID, keys := range splitKeyMap {
   273  			// if region not in tableRegionStats, that means this region is newly split, so
   274  			// we can skip split it again.
   275  			regionSize, ok := tableRegionStats[regionID]
   276  			if !ok {
   277  				log.L().Warn("region stats not found", zap.Uint64("region", regionID))
   278  			}
   279  			if len(keys) == 1 && regionSize < local.regionSplitSize {
   280  				skippedKeys++
   281  			}
   282  			select {
   283  			case ch <- &splitInfo{region: regionMap[regionID], keys: keys}:
   284  			case <-ctx.Done():
   285  				// outer context is canceled, can directly return
   286  				close(ch)
   287  				return ctx.Err()
   288  			case <-splitCtx.Done():
   289  				// met critical error, stop process
   290  				break sendLoop
   291  			}
   292  		}
   293  		close(ch)
   294  		if splitError := eg.Wait(); splitError != nil {
   295  			retryKeys = retryKeys[:0]
   296  			err = splitError
   297  			continue
   298  		}
   299  
   300  		if len(retryKeys) == 0 {
   301  			break
   302  		} else {
   303  			sort.Slice(retryKeys, func(i, j int) bool {
   304  				return bytes.Compare(retryKeys[i], retryKeys[j]) < 0
   305  			})
   306  			minKey = codec.EncodeBytes([]byte{}, retryKeys[0])
   307  			maxKey = codec.EncodeBytes([]byte{}, nextKey(retryKeys[len(retryKeys)-1]))
   308  		}
   309  	}
   310  	if err != nil {
   311  		return errors.Trace(err)
   312  	}
   313  
   314  	startTime := time.Now()
   315  	scatterCount := 0
   316  	for _, region := range scatterRegions {
   317  		local.waitForScatterRegion(ctx, region)
   318  		if time.Since(startTime) > split.ScatterWaitUpperInterval {
   319  			break
   320  		}
   321  		scatterCount++
   322  	}
   323  	if scatterCount == len(scatterRegions) {
   324  		log.L().Info("waiting for scattering regions done",
   325  			zap.Int("skipped_keys", skippedKeys),
   326  			zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime)))
   327  	} else {
   328  		log.L().Info("waiting for scattering regions timeout",
   329  			zap.Int("skipped_keys", skippedKeys),
   330  			zap.Int("scatterCount", scatterCount),
   331  			zap.Int("regions", len(scatterRegions)),
   332  			zap.Duration("take", time.Since(startTime)))
   333  	}
   334  	return nil
   335  }
   336  
   337  func fetchTableRegionSizeStats(ctx context.Context, db *sql.DB, tableID int64) (map[uint64]int64, error) {
   338  	exec := &common.SQLWithRetry{
   339  		DB:     db,
   340  		Logger: log.L(),
   341  	}
   342  
   343  	stats := make(map[uint64]int64)
   344  	err := exec.Transact(ctx, "fetch region approximate sizes", func(ctx context.Context, tx *sql.Tx) error {
   345  		rows, err := tx.QueryContext(ctx, "SELECT REGION_ID, APPROXIMATE_SIZE FROM information_schema.TIKV_REGION_STATUS WHERE TABLE_ID = ?", tableID)
   346  		if err != nil {
   347  			return errors.Trace(err)
   348  		}
   349  
   350  		defer rows.Close()
   351  		var (
   352  			regionID uint64
   353  			size     int64
   354  		)
   355  		for rows.Next() {
   356  			if err = rows.Scan(&regionID, &size); err != nil {
   357  				return errors.Trace(err)
   358  			}
   359  			stats[regionID] = size * units.MiB
   360  		}
   361  		return rows.Err()
   362  	})
   363  	return stats, errors.Trace(err)
   364  }
   365  
   366  func paginateScanRegion(
   367  	ctx context.Context, client split.SplitClient, startKey, endKey []byte, limit int,
   368  ) ([]*split.RegionInfo, error) {
   369  	if len(endKey) != 0 && bytes.Compare(startKey, endKey) >= 0 {
   370  		log.L().Error("startKey >= endKey when paginating scan region",
   371  			logutil.Key("startKey", startKey),
   372  			logutil.Key("endKey", endKey))
   373  		return nil, errors.Errorf("startKey >= endKey when paginating scan region")
   374  	}
   375  
   376  	var regions []*split.RegionInfo
   377  	for {
   378  		batch, err := client.ScanRegions(ctx, startKey, endKey, limit)
   379  		if err != nil {
   380  			return nil, errors.Trace(err)
   381  		}
   382  		regions = append(regions, batch...)
   383  		if len(batch) < limit {
   384  			// No more region
   385  			break
   386  		}
   387  		startKey = batch[len(batch)-1].Region.GetEndKey()
   388  		if len(startKey) == 0 ||
   389  			(len(endKey) > 0 && bytes.Compare(startKey, endKey) >= 0) {
   390  			// All key space have scanned
   391  			break
   392  		}
   393  	}
   394  	sort.Slice(regions, func(i, j int) bool {
   395  		return bytes.Compare(regions[i].Region.StartKey, regions[j].Region.StartKey) < 0
   396  	})
   397  	return regions, nil
   398  }
   399  
   400  func (local *local) BatchSplitRegions(ctx context.Context, region *split.RegionInfo, keys [][]byte) (*split.RegionInfo, []*split.RegionInfo, error) {
   401  	region, newRegions, err := local.splitCli.BatchSplitRegionsWithOrigin(ctx, region, keys)
   402  	if err != nil {
   403  		return nil, nil, errors.Annotatef(err, "batch split regions failed")
   404  	}
   405  	var failedErr error
   406  	retryRegions := make([]*split.RegionInfo, 0)
   407  	scatterRegions := newRegions
   408  	waitTime := splitRegionBaseBackOffTime
   409  	for i := 0; i < maxRetryTimes; i++ {
   410  		for _, region := range scatterRegions {
   411  			// Wait for a while until the regions successfully splits.
   412  			local.waitForSplit(ctx, region.Region.Id)
   413  			if err = local.splitCli.ScatterRegion(ctx, region); err != nil {
   414  				failedErr = err
   415  				retryRegions = append(retryRegions, region)
   416  			}
   417  		}
   418  		if len(retryRegions) == 0 {
   419  			break
   420  		}
   421  		// the scatter operation likely fails because region replicate not finish yet
   422  		// pack them to one log to avoid printing a lot warn logs.
   423  		log.L().Warn("scatter region failed", zap.Int("regionCount", len(newRegions)),
   424  			zap.Int("failedCount", len(retryRegions)), zap.Error(failedErr), zap.Int("retry", i))
   425  		scatterRegions = retryRegions
   426  		retryRegions = make([]*split.RegionInfo, 0)
   427  		select {
   428  		case <-time.After(waitTime):
   429  		case <-ctx.Done():
   430  			return nil, nil, ctx.Err()
   431  		}
   432  		waitTime *= 2
   433  	}
   434  
   435  	return region, newRegions, nil
   436  }
   437  
   438  func (local *local) hasRegion(ctx context.Context, regionID uint64) (bool, error) {
   439  	regionInfo, err := local.splitCli.GetRegionByID(ctx, regionID)
   440  	if err != nil {
   441  		return false, err
   442  	}
   443  	return regionInfo != nil, nil
   444  }
   445  
   446  func (local *local) waitForSplit(ctx context.Context, regionID uint64) {
   447  	for i := 0; i < split.SplitCheckMaxRetryTimes; i++ {
   448  		ok, err := local.hasRegion(ctx, regionID)
   449  		if err != nil {
   450  			log.L().Info("wait for split failed", log.ShortError(err))
   451  			return
   452  		}
   453  		if ok {
   454  			break
   455  		}
   456  		select {
   457  		case <-time.After(time.Second):
   458  		case <-ctx.Done():
   459  			return
   460  		}
   461  	}
   462  }
   463  
   464  func (local *local) waitForScatterRegion(ctx context.Context, regionInfo *split.RegionInfo) {
   465  	regionID := regionInfo.Region.GetId()
   466  	for i := 0; i < split.ScatterWaitMaxRetryTimes; i++ {
   467  		ok, err := local.isScatterRegionFinished(ctx, regionID)
   468  		if err != nil {
   469  			log.L().Warn("scatter region failed: do not have the region",
   470  				logutil.Region(regionInfo.Region))
   471  			return
   472  		}
   473  		if ok {
   474  			break
   475  		}
   476  		select {
   477  		case <-time.After(time.Second):
   478  		case <-ctx.Done():
   479  			return
   480  		}
   481  	}
   482  }
   483  
   484  func (local *local) isScatterRegionFinished(ctx context.Context, regionID uint64) (bool, error) {
   485  	resp, err := local.splitCli.GetOperator(ctx, regionID)
   486  	if err != nil {
   487  		return false, err
   488  	}
   489  	// Heartbeat may not be sent to PD
   490  	if respErr := resp.GetHeader().GetError(); respErr != nil {
   491  		if respErr.GetType() == pdpb.ErrorType_REGION_NOT_FOUND {
   492  			return true, nil
   493  		}
   494  		// don't return error if region replicate not complete
   495  		// TODO: should add a new error type to avoid this check by string matching
   496  		matches, _ := regexp.MatchString("region \\d+ is not fully replicated", respErr.Message)
   497  		if matches {
   498  			return false, nil
   499  		}
   500  		return false, errors.Errorf("get operator error: %s", respErr.GetType())
   501  	}
   502  	// If the current operator of the region is not 'scatter-region', we could assume
   503  	// that 'scatter-operator' has finished or timeout
   504  	ok := string(resp.GetDesc()) != "scatter-region" || resp.GetStatus() != pdpb.OperatorStatus_RUNNING
   505  	return ok, nil
   506  }
   507  
   508  func getSplitKeysByRanges(ranges []Range, regions []*split.RegionInfo) map[uint64][][]byte {
   509  	checkKeys := make([][]byte, 0)
   510  	var lastEnd []byte
   511  	for _, rg := range ranges {
   512  		if !bytes.Equal(lastEnd, rg.start) {
   513  			checkKeys = append(checkKeys, rg.start)
   514  		}
   515  		checkKeys = append(checkKeys, rg.end)
   516  		lastEnd = rg.end
   517  	}
   518  	return getSplitKeys(checkKeys, regions)
   519  }
   520  
   521  func getSplitKeys(checkKeys [][]byte, regions []*split.RegionInfo) map[uint64][][]byte {
   522  	splitKeyMap := make(map[uint64][][]byte)
   523  	for _, key := range checkKeys {
   524  		if region := needSplit(key, regions); region != nil {
   525  			splitKeys, ok := splitKeyMap[region.Region.GetId()]
   526  			if !ok {
   527  				splitKeys = make([][]byte, 0, 1)
   528  			}
   529  			splitKeyMap[region.Region.GetId()] = append(splitKeys, key)
   530  			log.L().Debug("get key for split region",
   531  				zap.Binary("key", key),
   532  				zap.Binary("startKey", region.Region.StartKey),
   533  				zap.Binary("endKey", region.Region.EndKey))
   534  		}
   535  	}
   536  	return splitKeyMap
   537  }
   538  
   539  // needSplit checks whether a key is necessary to split, if true returns the split region
   540  func needSplit(key []byte, regions []*split.RegionInfo) *split.RegionInfo {
   541  	// If splitKey is the max key.
   542  	if len(key) == 0 {
   543  		return nil
   544  	}
   545  	splitKey := codec.EncodeBytes([]byte{}, key)
   546  
   547  	idx := sort.Search(len(regions), func(i int) bool {
   548  		return beforeEnd(splitKey, regions[i].Region.EndKey)
   549  	})
   550  	if idx < len(regions) {
   551  		// If splitKey is in a region
   552  		if bytes.Compare(splitKey, regions[idx].Region.GetStartKey()) > 0 && beforeEnd(splitKey, regions[idx].Region.GetEndKey()) {
   553  			log.L().Debug("need split",
   554  				zap.Binary("splitKey", key),
   555  				zap.Binary("encodedKey", splitKey),
   556  				zap.Binary("region start", regions[idx].Region.GetStartKey()),
   557  				zap.Binary("region end", regions[idx].Region.GetEndKey()),
   558  			)
   559  			return regions[idx]
   560  		}
   561  	}
   562  	return nil
   563  }
   564  
   565  func beforeEnd(key []byte, end []byte) bool {
   566  	return bytes.Compare(key, end) < 0 || len(end) == 0
   567  }
   568  
   569  func insideRegion(region *metapb.Region, metas []*sst.SSTMeta) bool {
   570  	inside := true
   571  	for _, meta := range metas {
   572  		rg := meta.GetRange()
   573  		inside = inside && (keyInsideRegion(region, rg.GetStart()) && keyInsideRegion(region, rg.GetEnd()))
   574  	}
   575  	return inside
   576  }
   577  
   578  func keyInsideRegion(region *metapb.Region, key []byte) bool {
   579  	return bytes.Compare(key, region.GetStartKey()) >= 0 && (beforeEnd(key, region.GetEndKey()))
   580  }
   581  
   582  func intersectRange(region *metapb.Region, rg Range) Range {
   583  	var startKey, endKey []byte
   584  	if len(region.StartKey) > 0 {
   585  		_, startKey, _ = codec.DecodeBytes(region.StartKey, []byte{})
   586  	}
   587  	if bytes.Compare(startKey, rg.start) < 0 {
   588  		startKey = rg.start
   589  	}
   590  	if len(region.EndKey) > 0 {
   591  		_, endKey, _ = codec.DecodeBytes(region.EndKey, []byte{})
   592  	}
   593  	if beforeEnd(rg.end, endKey) {
   594  		endKey = rg.end
   595  	}
   596  
   597  	return Range{start: startKey, end: endKey}
   598  }