github.com/KinWaiYuen/client-go/v2@v2.5.4/internal/mockstore/mocktikv/cluster.go (about)

     1  // Copyright 2021 TiKV Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // NOTE: The code in this file is based on code from the
    16  // TiDB project, licensed under the Apache License v 2.0
    17  //
    18  // https://github.com/pingcap/tidb/tree/cc5e161ac06827589c4966674597c137cc9e809c/store/tikv/mockstore/mocktikv/cluster.go
    19  //
    20  
    21  // Copyright 2016 PingCAP, Inc.
    22  //
    23  // Licensed under the Apache License, Version 2.0 (the "License");
    24  // you may not use this file except in compliance with the License.
    25  // You may obtain a copy of the License at
    26  //
    27  //     http://www.apache.org/licenses/LICENSE-2.0
    28  //
    29  // Unless required by applicable law or agreed to in writing, software
    30  // distributed under the License is distributed on an "AS IS" BASIS,
    31  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    32  // See the License for the specific language governing permissions and
    33  // limitations under the License.
    34  
    35  package mocktikv
    36  
    37  import (
    38  	"bytes"
    39  	"context"
    40  	"math"
    41  	"sort"
    42  	"sync"
    43  	"time"
    44  
    45  	"github.com/golang/protobuf/proto"
    46  	"github.com/pingcap/kvproto/pkg/kvrpcpb"
    47  	"github.com/pingcap/kvproto/pkg/metapb"
    48  	pd "github.com/tikv/pd/client"
    49  )
    50  
    51  // Cluster simulates a TiKV cluster. It focuses on management and the change of
    52  // meta data. A Cluster mainly includes following 3 kinds of meta data:
    53  // 1) Region: A Region is a fragment of TiKV's data whose range is [start, end).
    54  //    The data of a Region is duplicated to multiple Peers and distributed in
    55  //    multiple Stores.
    56  // 2) Peer: A Peer is a replica of a Region's data. All peers of a Region form
    57  //    a group, each group elects a Leader to provide services.
    58  // 3) Store: A Store is a storage/service node. Try to think it as a TiKV server
    59  //    process. Only the store with request's Region's leader Peer could respond
    60  //    to client's request.
    61  type Cluster struct {
    62  	sync.RWMutex
    63  	id      uint64
    64  	stores  map[uint64]*Store
    65  	regions map[uint64]*Region
    66  
    67  	mvccStore MVCCStore
    68  
    69  	// delayEvents is used to control the execution sequence of rpc requests for test.
    70  	delayEvents map[delayKey]time.Duration
    71  	delayMu     sync.Mutex
    72  }
    73  
    74  type delayKey struct {
    75  	startTS  uint64
    76  	regionID uint64
    77  }
    78  
    79  // NewCluster creates an empty cluster. It needs to be bootstrapped before
    80  // providing service.
    81  func NewCluster(mvccStore MVCCStore) *Cluster {
    82  	return &Cluster{
    83  		stores:      make(map[uint64]*Store),
    84  		regions:     make(map[uint64]*Region),
    85  		delayEvents: make(map[delayKey]time.Duration),
    86  		mvccStore:   mvccStore,
    87  	}
    88  }
    89  
    90  // AllocID creates an unique ID in cluster. The ID could be used as either
    91  // StoreID, RegionID, or PeerID.
    92  func (c *Cluster) AllocID() uint64 {
    93  	c.Lock()
    94  	defer c.Unlock()
    95  
    96  	return c.allocID()
    97  }
    98  
    99  // AllocIDs creates multiple IDs.
   100  func (c *Cluster) AllocIDs(n int) []uint64 {
   101  	c.Lock()
   102  	defer c.Unlock()
   103  
   104  	var ids []uint64
   105  	for len(ids) < n {
   106  		ids = append(ids, c.allocID())
   107  	}
   108  	return ids
   109  }
   110  
   111  func (c *Cluster) allocID() uint64 {
   112  	c.id++
   113  	return c.id
   114  }
   115  
   116  // GetAllRegions gets all the regions in the cluster.
   117  func (c *Cluster) GetAllRegions() []*Region {
   118  	regions := make([]*Region, 0, len(c.regions))
   119  	for _, region := range c.regions {
   120  		regions = append(regions, region)
   121  	}
   122  	return regions
   123  }
   124  
   125  // GetStore returns a Store's meta.
   126  func (c *Cluster) GetStore(storeID uint64) *metapb.Store {
   127  	c.RLock()
   128  	defer c.RUnlock()
   129  
   130  	if store := c.stores[storeID]; store != nil {
   131  		return proto.Clone(store.meta).(*metapb.Store)
   132  	}
   133  	return nil
   134  }
   135  
   136  // GetAllStores returns all Stores' meta.
   137  func (c *Cluster) GetAllStores() []*metapb.Store {
   138  	c.RLock()
   139  	defer c.RUnlock()
   140  
   141  	stores := make([]*metapb.Store, 0, len(c.stores))
   142  	for _, store := range c.stores {
   143  		stores = append(stores, proto.Clone(store.meta).(*metapb.Store))
   144  	}
   145  	return stores
   146  }
   147  
   148  // StopStore stops a store with storeID.
   149  func (c *Cluster) StopStore(storeID uint64) {
   150  	c.Lock()
   151  	defer c.Unlock()
   152  
   153  	if store := c.stores[storeID]; store != nil {
   154  		store.meta.State = metapb.StoreState_Offline
   155  	}
   156  }
   157  
   158  // StartStore starts a store with storeID.
   159  func (c *Cluster) StartStore(storeID uint64) {
   160  	c.Lock()
   161  	defer c.Unlock()
   162  
   163  	if store := c.stores[storeID]; store != nil {
   164  		store.meta.State = metapb.StoreState_Up
   165  	}
   166  }
   167  
   168  // CancelStore makes the store with cancel state true.
   169  func (c *Cluster) CancelStore(storeID uint64) {
   170  	c.Lock()
   171  	defer c.Unlock()
   172  
   173  	// A store returns context.Cancelled Error when cancel is true.
   174  	if store := c.stores[storeID]; store != nil {
   175  		store.cancel = true
   176  	}
   177  }
   178  
   179  // UnCancelStore makes the store with cancel state false.
   180  func (c *Cluster) UnCancelStore(storeID uint64) {
   181  	c.Lock()
   182  	defer c.Unlock()
   183  
   184  	if store := c.stores[storeID]; store != nil {
   185  		store.cancel = false
   186  	}
   187  }
   188  
   189  // GetStoreByAddr returns a Store's meta by an addr.
   190  func (c *Cluster) GetStoreByAddr(addr string) *metapb.Store {
   191  	c.RLock()
   192  	defer c.RUnlock()
   193  
   194  	for _, s := range c.stores {
   195  		if s.meta.GetAddress() == addr {
   196  			return proto.Clone(s.meta).(*metapb.Store)
   197  		}
   198  	}
   199  	return nil
   200  }
   201  
   202  // GetAndCheckStoreByAddr checks and returns a Store's meta by an addr
   203  func (c *Cluster) GetAndCheckStoreByAddr(addr string) (ss []*metapb.Store, err error) {
   204  	c.RLock()
   205  	defer c.RUnlock()
   206  
   207  	for _, s := range c.stores {
   208  		if s.cancel {
   209  			err = context.Canceled
   210  			return
   211  		}
   212  		if s.meta.GetAddress() == addr {
   213  			ss = append(ss, proto.Clone(s.meta).(*metapb.Store))
   214  		}
   215  	}
   216  	return
   217  }
   218  
   219  // AddStore add a new Store to the cluster.
   220  func (c *Cluster) AddStore(storeID uint64, addr string, labels ...*metapb.StoreLabel) {
   221  	c.Lock()
   222  	defer c.Unlock()
   223  
   224  	c.stores[storeID] = newStore(storeID, addr, labels...)
   225  }
   226  
   227  // RemoveStore removes a Store from the cluster.
   228  func (c *Cluster) RemoveStore(storeID uint64) {
   229  	c.Lock()
   230  	defer c.Unlock()
   231  
   232  	delete(c.stores, storeID)
   233  }
   234  
   235  // MarkTombstone marks store as tombstone.
   236  func (c *Cluster) MarkTombstone(storeID uint64) {
   237  	c.Lock()
   238  	defer c.Unlock()
   239  	nm := *c.stores[storeID].meta
   240  	nm.State = metapb.StoreState_Tombstone
   241  	c.stores[storeID].meta = &nm
   242  }
   243  
   244  // UpdateStoreAddr updates store address for cluster.
   245  func (c *Cluster) UpdateStoreAddr(storeID uint64, addr string, labels ...*metapb.StoreLabel) {
   246  	c.Lock()
   247  	defer c.Unlock()
   248  	c.stores[storeID] = newStore(storeID, addr, labels...)
   249  }
   250  
   251  // GetRegion returns a Region's meta and leader ID.
   252  func (c *Cluster) GetRegion(regionID uint64) (*metapb.Region, uint64) {
   253  	c.RLock()
   254  	defer c.RUnlock()
   255  
   256  	r := c.regions[regionID]
   257  	if r == nil {
   258  		return nil, 0
   259  	}
   260  	return proto.Clone(r.Meta).(*metapb.Region), r.leader
   261  }
   262  
   263  // GetRegionByKey returns the Region and its leader whose range contains the key.
   264  func (c *Cluster) GetRegionByKey(key []byte) (*metapb.Region, *metapb.Peer) {
   265  	c.RLock()
   266  	defer c.RUnlock()
   267  
   268  	return c.getRegionByKeyNoLock(key)
   269  }
   270  
   271  // getRegionByKeyNoLock returns the Region and its leader whose range contains the key without Lock.
   272  func (c *Cluster) getRegionByKeyNoLock(key []byte) (*metapb.Region, *metapb.Peer) {
   273  	for _, r := range c.regions {
   274  		if regionContains(r.Meta.StartKey, r.Meta.EndKey, key) {
   275  			return proto.Clone(r.Meta).(*metapb.Region), proto.Clone(r.leaderPeer()).(*metapb.Peer)
   276  		}
   277  	}
   278  	return nil, nil
   279  }
   280  
   281  // GetPrevRegionByKey returns the previous Region and its leader whose range contains the key.
   282  func (c *Cluster) GetPrevRegionByKey(key []byte) (*metapb.Region, *metapb.Peer) {
   283  	c.RLock()
   284  	defer c.RUnlock()
   285  
   286  	currentRegion, _ := c.getRegionByKeyNoLock(key)
   287  	if len(currentRegion.StartKey) == 0 {
   288  		return nil, nil
   289  	}
   290  	for _, r := range c.regions {
   291  		if bytes.Equal(r.Meta.EndKey, currentRegion.StartKey) {
   292  			return proto.Clone(r.Meta).(*metapb.Region), proto.Clone(r.leaderPeer()).(*metapb.Peer)
   293  		}
   294  	}
   295  	return nil, nil
   296  }
   297  
   298  // GetRegionByID returns the Region and its leader whose ID is regionID.
   299  func (c *Cluster) GetRegionByID(regionID uint64) (*metapb.Region, *metapb.Peer) {
   300  	c.RLock()
   301  	defer c.RUnlock()
   302  
   303  	for _, r := range c.regions {
   304  		if r.Meta.GetId() == regionID {
   305  			return proto.Clone(r.Meta).(*metapb.Region), proto.Clone(r.leaderPeer()).(*metapb.Peer)
   306  		}
   307  	}
   308  	return nil, nil
   309  }
   310  
   311  // ScanRegions returns at most `limit` regions from given `key` and their leaders.
   312  func (c *Cluster) ScanRegions(startKey, endKey []byte, limit int) []*pd.Region {
   313  	c.RLock()
   314  	defer c.RUnlock()
   315  
   316  	regions := make([]*Region, 0, len(c.regions))
   317  	for _, region := range c.regions {
   318  		regions = append(regions, region)
   319  	}
   320  
   321  	sort.Slice(regions, func(i, j int) bool {
   322  		return bytes.Compare(regions[i].Meta.GetStartKey(), regions[j].Meta.GetStartKey()) < 0
   323  	})
   324  
   325  	startPos := sort.Search(len(regions), func(i int) bool {
   326  		if len(regions[i].Meta.GetEndKey()) == 0 {
   327  			return true
   328  		}
   329  		return bytes.Compare(regions[i].Meta.GetEndKey(), startKey) > 0
   330  	})
   331  	regions = regions[startPos:]
   332  	if len(endKey) > 0 {
   333  		endPos := sort.Search(len(regions), func(i int) bool {
   334  			return bytes.Compare(regions[i].Meta.GetStartKey(), endKey) >= 0
   335  		})
   336  		if endPos > 0 {
   337  			regions = regions[:endPos]
   338  		}
   339  	}
   340  	if limit > 0 && len(regions) > limit {
   341  		regions = regions[:limit]
   342  	}
   343  
   344  	result := make([]*pd.Region, 0, len(regions))
   345  	for _, region := range regions {
   346  		leader := region.leaderPeer()
   347  		if leader == nil {
   348  			leader = &metapb.Peer{}
   349  		} else {
   350  			leader = proto.Clone(leader).(*metapb.Peer)
   351  		}
   352  
   353  		r := &pd.Region{
   354  			Meta:   proto.Clone(region.Meta).(*metapb.Region),
   355  			Leader: leader,
   356  		}
   357  		result = append(result, r)
   358  	}
   359  
   360  	return result
   361  }
   362  
   363  // Bootstrap creates the first Region. The Stores should be in the Cluster before
   364  // bootstrap.
   365  func (c *Cluster) Bootstrap(regionID uint64, storeIDs, peerIDs []uint64, leaderPeerID uint64) {
   366  	c.Lock()
   367  	defer c.Unlock()
   368  
   369  	if len(storeIDs) != len(peerIDs) {
   370  		panic("len(storeIDs) != len(peerIDs)")
   371  	}
   372  	c.regions[regionID] = newRegion(regionID, storeIDs, peerIDs, leaderPeerID)
   373  }
   374  
   375  // AddPeer adds a new Peer for the Region on the Store.
   376  func (c *Cluster) AddPeer(regionID, storeID, peerID uint64) {
   377  	c.Lock()
   378  	defer c.Unlock()
   379  
   380  	c.regions[regionID].addPeer(peerID, storeID)
   381  }
   382  
   383  // RemovePeer removes the Peer from the Region. Note that if the Peer is leader,
   384  // the Region will have no leader before calling ChangeLeader().
   385  func (c *Cluster) RemovePeer(regionID, storeID uint64) {
   386  	c.Lock()
   387  	defer c.Unlock()
   388  
   389  	c.regions[regionID].removePeer(storeID)
   390  }
   391  
   392  // ChangeLeader sets the Region's leader Peer. Caller should guarantee the Peer
   393  // exists.
   394  func (c *Cluster) ChangeLeader(regionID, leaderPeerID uint64) {
   395  	c.Lock()
   396  	defer c.Unlock()
   397  
   398  	c.regions[regionID].changeLeader(leaderPeerID)
   399  }
   400  
   401  // GiveUpLeader sets the Region's leader to 0. The Region will have no leader
   402  // before calling ChangeLeader().
   403  func (c *Cluster) GiveUpLeader(regionID uint64) {
   404  	c.ChangeLeader(regionID, 0)
   405  }
   406  
   407  // Split splits a Region at the key (encoded) and creates new Region.
   408  func (c *Cluster) Split(regionID, newRegionID uint64, key []byte, peerIDs []uint64, leaderPeerID uint64) {
   409  	c.SplitRaw(regionID, newRegionID, NewMvccKey(key), peerIDs, leaderPeerID)
   410  }
   411  
   412  // SplitRaw splits a Region at the key (not encoded) and creates new Region.
   413  func (c *Cluster) SplitRaw(regionID, newRegionID uint64, rawKey []byte, peerIDs []uint64, leaderPeerID uint64) *metapb.Region {
   414  	c.Lock()
   415  	defer c.Unlock()
   416  
   417  	newRegion := c.regions[regionID].split(newRegionID, rawKey, peerIDs, leaderPeerID)
   418  	c.regions[newRegionID] = newRegion
   419  	// The mocktikv should return a deep copy of meta info to avoid data race
   420  	meta := proto.Clone(newRegion.Meta)
   421  	return meta.(*metapb.Region)
   422  }
   423  
   424  // Merge merges 2 regions, their key ranges should be adjacent.
   425  func (c *Cluster) Merge(regionID1, regionID2 uint64) {
   426  	c.Lock()
   427  	defer c.Unlock()
   428  
   429  	c.regions[regionID1].merge(c.regions[regionID2].Meta.GetEndKey())
   430  	delete(c.regions, regionID2)
   431  }
   432  
   433  // SplitKeys evenly splits the start, end key into "count" regions.
   434  // Only works for single store.
   435  func (c *Cluster) SplitKeys(start, end []byte, count int) {
   436  	c.splitRange(c.mvccStore, NewMvccKey(start), NewMvccKey(end), count)
   437  }
   438  
   439  // ScheduleDelay schedules a delay event for a transaction on a region.
   440  func (c *Cluster) ScheduleDelay(startTS, regionID uint64, dur time.Duration) {
   441  	c.delayMu.Lock()
   442  	c.delayEvents[delayKey{startTS: startTS, regionID: regionID}] = dur
   443  	c.delayMu.Unlock()
   444  }
   445  
   446  // UpdateStoreLabels merge the target and owned labels together
   447  func (c *Cluster) UpdateStoreLabels(storeID uint64, labels []*metapb.StoreLabel) {
   448  	c.Lock()
   449  	defer c.Unlock()
   450  	c.stores[storeID].mergeLabels(labels)
   451  }
   452  
   453  func (c *Cluster) handleDelay(startTS, regionID uint64) {
   454  	key := delayKey{startTS: startTS, regionID: regionID}
   455  	c.delayMu.Lock()
   456  	dur, ok := c.delayEvents[key]
   457  	if ok {
   458  		delete(c.delayEvents, key)
   459  	}
   460  	c.delayMu.Unlock()
   461  	if ok {
   462  		time.Sleep(dur)
   463  	}
   464  }
   465  
   466  func (c *Cluster) splitRange(mvccStore MVCCStore, start, end MvccKey, count int) {
   467  	c.Lock()
   468  	defer c.Unlock()
   469  	c.evacuateOldRegionRanges(start, end)
   470  	regionPairs := c.getEntriesGroupByRegions(mvccStore, start, end, count)
   471  	c.createNewRegions(regionPairs, start, end)
   472  }
   473  
   474  // getEntriesGroupByRegions groups the key value pairs into splitted regions.
   475  func (c *Cluster) getEntriesGroupByRegions(mvccStore MVCCStore, start, end MvccKey, count int) [][]Pair {
   476  	startTS := uint64(math.MaxUint64)
   477  	limit := math.MaxInt32
   478  	pairs := mvccStore.Scan(start.Raw(), end.Raw(), limit, startTS, kvrpcpb.IsolationLevel_SI, nil)
   479  	regionEntriesSlice := make([][]Pair, 0, count)
   480  	quotient := len(pairs) / count
   481  	remainder := len(pairs) % count
   482  	i := 0
   483  	for i < len(pairs) {
   484  		regionEntryCount := quotient
   485  		if remainder > 0 {
   486  			remainder--
   487  			regionEntryCount++
   488  		}
   489  		regionEntries := pairs[i : i+regionEntryCount]
   490  		regionEntriesSlice = append(regionEntriesSlice, regionEntries)
   491  		i += regionEntryCount
   492  	}
   493  	return regionEntriesSlice
   494  }
   495  
   496  func (c *Cluster) createNewRegions(regionPairs [][]Pair, start, end MvccKey) {
   497  	for i := range regionPairs {
   498  		peerID := c.allocID()
   499  		newRegion := newRegion(c.allocID(), []uint64{c.firstStoreID()}, []uint64{peerID}, peerID)
   500  		var regionStartKey, regionEndKey MvccKey
   501  		if i == 0 {
   502  			regionStartKey = start
   503  		} else {
   504  			regionStartKey = NewMvccKey(regionPairs[i][0].Key)
   505  		}
   506  		if i == len(regionPairs)-1 {
   507  			regionEndKey = end
   508  		} else {
   509  			// Use the next region's first key as region end key.
   510  			regionEndKey = NewMvccKey(regionPairs[i+1][0].Key)
   511  		}
   512  		newRegion.updateKeyRange(regionStartKey, regionEndKey)
   513  		c.regions[newRegion.Meta.Id] = newRegion
   514  	}
   515  }
   516  
   517  // evacuateOldRegionRanges evacuate the range [start, end].
   518  // Old regions has intersection with [start, end) will be updated or deleted.
   519  func (c *Cluster) evacuateOldRegionRanges(start, end MvccKey) {
   520  	oldRegions := c.getRegionsCoverRange(start, end)
   521  	for _, oldRegion := range oldRegions {
   522  		startCmp := bytes.Compare(oldRegion.Meta.StartKey, start)
   523  		endCmp := bytes.Compare(oldRegion.Meta.EndKey, end)
   524  		if len(oldRegion.Meta.EndKey) == 0 {
   525  			endCmp = 1
   526  		}
   527  		if startCmp >= 0 && endCmp <= 0 {
   528  			// The region is within table data, it will be replaced by new regions.
   529  			delete(c.regions, oldRegion.Meta.Id)
   530  		} else if startCmp < 0 && endCmp > 0 {
   531  			// A single Region covers table data, split into two regions that do not overlap table data.
   532  			oldEnd := oldRegion.Meta.EndKey
   533  			oldRegion.updateKeyRange(oldRegion.Meta.StartKey, start)
   534  			peerID := c.allocID()
   535  			newRegion := newRegion(c.allocID(), []uint64{c.firstStoreID()}, []uint64{peerID}, peerID)
   536  			newRegion.updateKeyRange(end, oldEnd)
   537  			c.regions[newRegion.Meta.Id] = newRegion
   538  		} else if startCmp < 0 {
   539  			oldRegion.updateKeyRange(oldRegion.Meta.StartKey, start)
   540  		} else {
   541  			oldRegion.updateKeyRange(end, oldRegion.Meta.EndKey)
   542  		}
   543  	}
   544  }
   545  
   546  func (c *Cluster) firstStoreID() uint64 {
   547  	for id := range c.stores {
   548  		return id
   549  	}
   550  	return 0
   551  }
   552  
   553  // getRegionsCoverRange gets regions in the cluster that has intersection with [start, end).
   554  func (c *Cluster) getRegionsCoverRange(start, end MvccKey) []*Region {
   555  	regions := make([]*Region, 0, len(c.regions))
   556  	for _, region := range c.regions {
   557  		onRight := bytes.Compare(end, region.Meta.StartKey) <= 0
   558  		onLeft := bytes.Compare(region.Meta.EndKey, start) <= 0
   559  		if len(region.Meta.EndKey) == 0 {
   560  			onLeft = false
   561  		}
   562  		if onLeft || onRight {
   563  			continue
   564  		}
   565  		regions = append(regions, region)
   566  	}
   567  	return regions
   568  }
   569  
   570  // Region is the Region meta data.
   571  type Region struct {
   572  	Meta   *metapb.Region
   573  	leader uint64
   574  }
   575  
   576  func newPeerMeta(peerID, storeID uint64) *metapb.Peer {
   577  	return &metapb.Peer{
   578  		Id:      peerID,
   579  		StoreId: storeID,
   580  	}
   581  }
   582  
   583  func newRegion(regionID uint64, storeIDs, peerIDs []uint64, leaderPeerID uint64) *Region {
   584  	if len(storeIDs) != len(peerIDs) {
   585  		panic("len(storeIDs) != len(peerIds)")
   586  	}
   587  	peers := make([]*metapb.Peer, 0, len(storeIDs))
   588  	for i := range storeIDs {
   589  		peers = append(peers, newPeerMeta(peerIDs[i], storeIDs[i]))
   590  	}
   591  	meta := &metapb.Region{
   592  		Id:    regionID,
   593  		Peers: peers,
   594  	}
   595  	return &Region{
   596  		Meta:   meta,
   597  		leader: leaderPeerID,
   598  	}
   599  }
   600  
   601  func (r *Region) addPeer(peerID, storeID uint64) {
   602  	r.Meta.Peers = append(r.Meta.Peers, newPeerMeta(peerID, storeID))
   603  	r.incConfVer()
   604  }
   605  
   606  func (r *Region) removePeer(peerID uint64) {
   607  	for i, peer := range r.Meta.Peers {
   608  		if peer.GetId() == peerID {
   609  			r.Meta.Peers = append(r.Meta.Peers[:i], r.Meta.Peers[i+1:]...)
   610  			break
   611  		}
   612  	}
   613  	if r.leader == peerID {
   614  		r.leader = 0
   615  	}
   616  	r.incConfVer()
   617  }
   618  
   619  func (r *Region) changeLeader(leaderID uint64) {
   620  	r.leader = leaderID
   621  }
   622  
   623  func (r *Region) leaderPeer() *metapb.Peer {
   624  	for _, p := range r.Meta.Peers {
   625  		if p.GetId() == r.leader {
   626  			return p
   627  		}
   628  	}
   629  	return nil
   630  }
   631  
   632  func (r *Region) split(newRegionID uint64, key MvccKey, peerIDs []uint64, leaderPeerID uint64) *Region {
   633  	if len(r.Meta.Peers) != len(peerIDs) {
   634  		panic("len(r.meta.Peers) != len(peerIDs)")
   635  	}
   636  	storeIDs := make([]uint64, 0, len(r.Meta.Peers))
   637  	for _, peer := range r.Meta.Peers {
   638  		storeIDs = append(storeIDs, peer.GetStoreId())
   639  	}
   640  	region := newRegion(newRegionID, storeIDs, peerIDs, leaderPeerID)
   641  	region.updateKeyRange(key, r.Meta.EndKey)
   642  	r.updateKeyRange(r.Meta.StartKey, key)
   643  	return region
   644  }
   645  
   646  func (r *Region) merge(endKey MvccKey) {
   647  	r.Meta.EndKey = endKey
   648  	r.incVersion()
   649  }
   650  
   651  func (r *Region) updateKeyRange(start, end MvccKey) {
   652  	r.Meta.StartKey = start
   653  	r.Meta.EndKey = end
   654  	r.incVersion()
   655  }
   656  
   657  func (r *Region) incConfVer() {
   658  	r.Meta.RegionEpoch = &metapb.RegionEpoch{
   659  		ConfVer: r.Meta.GetRegionEpoch().GetConfVer() + 1,
   660  		Version: r.Meta.GetRegionEpoch().GetVersion(),
   661  	}
   662  }
   663  
   664  func (r *Region) incVersion() {
   665  	r.Meta.RegionEpoch = &metapb.RegionEpoch{
   666  		ConfVer: r.Meta.GetRegionEpoch().GetConfVer(),
   667  		Version: r.Meta.GetRegionEpoch().GetVersion() + 1,
   668  	}
   669  }
   670  
   671  // Store is the Store's meta data.
   672  type Store struct {
   673  	meta   *metapb.Store
   674  	cancel bool // return context.Cancelled error when cancel is true.
   675  }
   676  
   677  func newStore(storeID uint64, addr string, labels ...*metapb.StoreLabel) *Store {
   678  	return &Store{
   679  		meta: &metapb.Store{
   680  			Id:      storeID,
   681  			Address: addr,
   682  			Labels:  labels,
   683  		},
   684  	}
   685  }
   686  
   687  func (s *Store) mergeLabels(labels []*metapb.StoreLabel) {
   688  	if len(s.meta.Labels) < 1 {
   689  		s.meta.Labels = labels
   690  		return
   691  	}
   692  	kv := make(map[string]string, len(s.meta.Labels))
   693  	for _, label := range s.meta.Labels {
   694  		kv[label.Key] = label.Value
   695  	}
   696  	for _, label := range labels {
   697  		kv[label.Key] = label.Value
   698  	}
   699  	mergedLabels := make([]*metapb.StoreLabel, 0, len(kv))
   700  	for k, v := range kv {
   701  		mergedLabels = append(mergedLabels, &metapb.StoreLabel{
   702  			Key:   k,
   703  			Value: v,
   704  		})
   705  	}
   706  	s.meta.Labels = mergedLabels
   707  }