github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/dbs/memristed/meta/autoid/autoid.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package autoid
    15  
    16  import (
    17  	"context"
    18  	"math"
    19  	"sync"
    20  	"time"
    21  
    22  	"github.com/cznic/mathutil"
    23  	"github.com/whtcorpsinc/BerolinaSQL/allegrosql"
    24  	"github.com/whtcorpsinc/BerolinaSQL/perceptron"
    25  	"github.com/whtcorpsinc/errors"
    26  	"github.com/whtcorpsinc/failpoint"
    27  	"github.com/whtcorpsinc/milevadb/ekv"
    28  	"github.com/whtcorpsinc/milevadb/metrics"
    29  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    30  	"github.com/whtcorpsinc/milevadb/spacetime"
    31  	"github.com/whtcorpsinc/milevadb/types"
    32  	"go.uber.org/zap"
    33  )
    34  
    35  // Attention:
    36  // For reading cluster MilevaDB memory blocks, the system schemaReplicant/causet should be same.
    37  // Once the system schemaReplicant/causet id been allocated, it can't be changed any more.
    38  // Change the system schemaReplicant/causet id may have the compatibility problem.
    39  const (
    40  	// SystemSchemaIDFlag is the system schemaReplicant/causet id flag, uses the highest bit position as system schemaReplicant ID flag, it's exports for test.
    41  	SystemSchemaIDFlag = 1 << 62
    42  	// InformationSchemaDBID is the information_schema schemaReplicant id, it's exports for test.
    43  	InformationSchemaDBID int64 = SystemSchemaIDFlag | 1
    44  	// PerformanceSchemaDBID is the performance_schema schemaReplicant id, it's exports for test.
    45  	PerformanceSchemaDBID int64 = SystemSchemaIDFlag | 10000
    46  	// MetricSchemaDBID is the metrics_schema schemaReplicant id, it's exported for test.
    47  	MetricSchemaDBID int64 = SystemSchemaIDFlag | 20000
    48  )
    49  
    50  const (
    51  	minStep            = 30000
    52  	maxStep            = 2000000
    53  	defaultConsumeTime = 10 * time.Second
    54  	minIncrement       = 1
    55  	maxIncrement       = 65535
    56  )
    57  
    58  // RowIDBitLength is the bit number of a event id in MilevaDB.
    59  const RowIDBitLength = 64
    60  
    61  // DefaultAutoRandomBits is the default value of auto sharding.
    62  const DefaultAutoRandomBits = 5
    63  
    64  // MaxAutoRandomBits is the max value of auto sharding.
    65  const MaxAutoRandomBits = 15
    66  
    67  // Test needs to change it, so it's a variable.
    68  var step = int64(30000)
    69  
    70  // SlabPredictorType is the type of allocator for generating auto-id. Different type of allocators use different key-value pairs.
    71  type SlabPredictorType = uint8
    72  
    73  const (
    74  	// RowIDAllocType indicates the allocator is used to allocate event id.
    75  	RowIDAllocType SlabPredictorType = iota
    76  	// AutoIncrementType indicates the allocator is used to allocate auto increment value.
    77  	AutoIncrementType
    78  	// AutoRandomType indicates the allocator is used to allocate auto-shard id.
    79  	AutoRandomType
    80  	// SequenceType indicates the allocator is used to allocate sequence value.
    81  	SequenceType
    82  )
    83  
    84  // CustomAutoIncCacheOption is one HoTT of AllocOption to customize the allocator step length.
    85  type CustomAutoIncCacheOption int64
    86  
    87  // ApplyOn is implement the AllocOption interface.
    88  func (step CustomAutoIncCacheOption) ApplyOn(alloc *allocator) {
    89  	alloc.step = int64(step)
    90  	alloc.customStep = true
    91  }
    92  
    93  // AllocOption is a interface to define allocator custom options coming in future.
    94  type AllocOption interface {
    95  	ApplyOn(*allocator)
    96  }
    97  
    98  // SlabPredictor is an auto increment id generator.
    99  // Just keep id unique actually.
   100  type SlabPredictor interface {
   101  	// Alloc allocs N consecutive autoID for causet with blockID, returning (min, max] of the allocated autoID batch.
   102  	// It gets a batch of autoIDs at a time. So it does not need to access storage for each call.
   103  	// The consecutive feature is used to insert multiple rows in a memex.
   104  	// increment & offset is used to validate the start position (the allocator's base is not always the last allocated id).
   105  	// The returned range is (min, max]:
   106  	// case increment=1 & offset=1: you can derive the ids like min+1, min+2... max.
   107  	// case increment=x & offset=y: you firstly need to seek to firstID by `SeekToFirstAutoIDXXX`, then derive the IDs like firstID, firstID + increment * 2... in the caller.
   108  	Alloc(blockID int64, n uint64, increment, offset int64) (int64, int64, error)
   109  
   110  	// AllocSeqCache allocs sequence batch value cached in causet level(rather than in alloc), the returned range covering
   111  	// the size of sequence cache with it's increment. The returned round indicates the sequence cycle times if it is with
   112  	// cycle option.
   113  	AllocSeqCache(sequenceID int64) (min int64, max int64, round int64, err error)
   114  
   115  	// Rebase rebases the autoID base for causet with blockID and the new base value.
   116  	// If allocIDs is true, it will allocate some IDs and save to the cache.
   117  	// If allocIDs is false, it will not allocate IDs.
   118  	Rebase(blockID, newBase int64, allocIDs bool) error
   119  
   120  	// RebaseSeq rebases the sequence value in number axis with blockID and the new base value.
   121  	RebaseSeq(causet, newBase int64) (int64, bool, error)
   122  
   123  	// Base return the current base of SlabPredictor.
   124  	Base() int64
   125  	// End is only used for test.
   126  	End() int64
   127  	// NextGlobalAutoID returns the next global autoID.
   128  	NextGlobalAutoID(blockID int64) (int64, error)
   129  	GetType() SlabPredictorType
   130  }
   131  
   132  // SlabPredictors represents a set of `SlabPredictor`s.
   133  type SlabPredictors []SlabPredictor
   134  
   135  // NewSlabPredictors packs multiple `SlabPredictor`s into SlabPredictors.
   136  func NewSlabPredictors(allocators ...SlabPredictor) SlabPredictors {
   137  	return allocators
   138  }
   139  
   140  // Get returns the SlabPredictor according to the SlabPredictorType.
   141  func (all SlabPredictors) Get(allocType SlabPredictorType) SlabPredictor {
   142  	for _, a := range all {
   143  		if a.GetType() == allocType {
   144  			return a
   145  		}
   146  	}
   147  	return nil
   148  }
   149  
   150  type allocator struct {
   151  	mu          sync.Mutex
   152  	base        int64
   153  	end         int64
   154  	causetstore ekv.CausetStorage
   155  	// dbID is current database's ID.
   156  	dbID          int64
   157  	isUnsigned    bool
   158  	lastAllocTime time.Time
   159  	step          int64
   160  	customStep    bool
   161  	allocType     SlabPredictorType
   162  	sequence      *perceptron.SequenceInfo
   163  }
   164  
   165  // GetStep is only used by tests
   166  func GetStep() int64 {
   167  	return step
   168  }
   169  
   170  // SetStep is only used by tests
   171  func SetStep(s int64) {
   172  	step = s
   173  }
   174  
   175  // Base implements autoid.SlabPredictor Base interface.
   176  func (alloc *allocator) Base() int64 {
   177  	return alloc.base
   178  }
   179  
   180  // End implements autoid.SlabPredictor End interface.
   181  func (alloc *allocator) End() int64 {
   182  	return alloc.end
   183  }
   184  
   185  // NextGlobalAutoID implements autoid.SlabPredictor NextGlobalAutoID interface.
   186  func (alloc *allocator) NextGlobalAutoID(blockID int64) (int64, error) {
   187  	var autoID int64
   188  	startTime := time.Now()
   189  	err := ekv.RunInNewTxn(alloc.causetstore, true, func(txn ekv.Transaction) error {
   190  		var err1 error
   191  		m := spacetime.NewMeta(txn)
   192  		autoID, err1 = getAutoIDByAllocType(m, alloc.dbID, blockID, alloc.allocType)
   193  		if err1 != nil {
   194  			return errors.Trace(err1)
   195  		}
   196  		return nil
   197  	})
   198  	metrics.AutoIDHistogram.WithLabelValues(metrics.GlobalAutoID, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
   199  	if alloc.isUnsigned {
   200  		return int64(uint64(autoID) + 1), err
   201  	}
   202  	return autoID + 1, err
   203  }
   204  
   205  func (alloc *allocator) rebase4Unsigned(blockID int64, requiredBase uint64, allocIDs bool) error {
   206  	// Satisfied by alloc.base, nothing to do.
   207  	if requiredBase <= uint64(alloc.base) {
   208  		return nil
   209  	}
   210  	// Satisfied by alloc.end, need to uFIDelate alloc.base.
   211  	if requiredBase <= uint64(alloc.end) {
   212  		alloc.base = int64(requiredBase)
   213  		return nil
   214  	}
   215  	var newBase, newEnd uint64
   216  	startTime := time.Now()
   217  	err := ekv.RunInNewTxn(alloc.causetstore, true, func(txn ekv.Transaction) error {
   218  		m := spacetime.NewMeta(txn)
   219  		currentEnd, err1 := getAutoIDByAllocType(m, alloc.dbID, blockID, alloc.allocType)
   220  		if err1 != nil {
   221  			return err1
   222  		}
   223  		uCurrentEnd := uint64(currentEnd)
   224  		if allocIDs {
   225  			newBase = mathutil.MaxUint64(uCurrentEnd, requiredBase)
   226  			newEnd = mathutil.MinUint64(math.MaxUint64-uint64(alloc.step), newBase) + uint64(alloc.step)
   227  		} else {
   228  			if uCurrentEnd >= requiredBase {
   229  				newBase = uCurrentEnd
   230  				newEnd = uCurrentEnd
   231  				// Required base satisfied, we don't need to uFIDelate KV.
   232  				return nil
   233  			}
   234  			// If we don't want to allocate IDs, for example when creating a causet with a given base value,
   235  			// We need to make sure when other MilevaDB server allocates ID for the first time, requiredBase + 1
   236  			// will be allocated, so we need to increase the end to exactly the requiredBase.
   237  			newBase = requiredBase
   238  			newEnd = requiredBase
   239  		}
   240  		_, err1 = generateAutoIDByAllocType(m, alloc.dbID, blockID, int64(newEnd-uCurrentEnd), alloc.allocType)
   241  		return err1
   242  	})
   243  	metrics.AutoIDHistogram.WithLabelValues(metrics.BlockAutoIDRebase, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
   244  	if err != nil {
   245  		return err
   246  	}
   247  	alloc.base, alloc.end = int64(newBase), int64(newEnd)
   248  	return nil
   249  }
   250  
   251  func (alloc *allocator) rebase4Signed(blockID, requiredBase int64, allocIDs bool) error {
   252  	// Satisfied by alloc.base, nothing to do.
   253  	if requiredBase <= alloc.base {
   254  		return nil
   255  	}
   256  	// Satisfied by alloc.end, need to uFIDelate alloc.base.
   257  	if requiredBase <= alloc.end {
   258  		alloc.base = requiredBase
   259  		return nil
   260  	}
   261  	var newBase, newEnd int64
   262  	startTime := time.Now()
   263  	err := ekv.RunInNewTxn(alloc.causetstore, true, func(txn ekv.Transaction) error {
   264  		m := spacetime.NewMeta(txn)
   265  		currentEnd, err1 := getAutoIDByAllocType(m, alloc.dbID, blockID, alloc.allocType)
   266  		if err1 != nil {
   267  			return err1
   268  		}
   269  		if allocIDs {
   270  			newBase = mathutil.MaxInt64(currentEnd, requiredBase)
   271  			newEnd = mathutil.MinInt64(math.MaxInt64-alloc.step, newBase) + alloc.step
   272  		} else {
   273  			if currentEnd >= requiredBase {
   274  				newBase = currentEnd
   275  				newEnd = currentEnd
   276  				// Required base satisfied, we don't need to uFIDelate KV.
   277  				return nil
   278  			}
   279  			// If we don't want to allocate IDs, for example when creating a causet with a given base value,
   280  			// We need to make sure when other MilevaDB server allocates ID for the first time, requiredBase + 1
   281  			// will be allocated, so we need to increase the end to exactly the requiredBase.
   282  			newBase = requiredBase
   283  			newEnd = requiredBase
   284  		}
   285  		_, err1 = generateAutoIDByAllocType(m, alloc.dbID, blockID, newEnd-currentEnd, alloc.allocType)
   286  		return err1
   287  	})
   288  	metrics.AutoIDHistogram.WithLabelValues(metrics.BlockAutoIDRebase, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
   289  	if err != nil {
   290  		return err
   291  	}
   292  	alloc.base, alloc.end = newBase, newEnd
   293  	return nil
   294  }
   295  
   296  // rebase4Sequence won't alloc batch immediately, cause it won't cache value in allocator.
   297  func (alloc *allocator) rebase4Sequence(blockID, requiredBase int64) (int64, bool, error) {
   298  	startTime := time.Now()
   299  	alreadySatisfied := false
   300  	err := ekv.RunInNewTxn(alloc.causetstore, true, func(txn ekv.Transaction) error {
   301  		m := spacetime.NewMeta(txn)
   302  		currentEnd, err := getAutoIDByAllocType(m, alloc.dbID, blockID, alloc.allocType)
   303  		if err != nil {
   304  			return err
   305  		}
   306  		if alloc.sequence.Increment > 0 {
   307  			if currentEnd >= requiredBase {
   308  				// Required base satisfied, we don't need to uFIDelate KV.
   309  				alreadySatisfied = true
   310  				return nil
   311  			}
   312  		} else {
   313  			if currentEnd <= requiredBase {
   314  				// Required base satisfied, we don't need to uFIDelate KV.
   315  				alreadySatisfied = true
   316  				return nil
   317  			}
   318  		}
   319  
   320  		// If we don't want to allocate IDs, for example when creating a causet with a given base value,
   321  		// We need to make sure when other MilevaDB server allocates ID for the first time, requiredBase + 1
   322  		// will be allocated, so we need to increase the end to exactly the requiredBase.
   323  		_, err = generateAutoIDByAllocType(m, alloc.dbID, blockID, requiredBase-currentEnd, alloc.allocType)
   324  		return err
   325  	})
   326  	// TODO: sequence metrics
   327  	metrics.AutoIDHistogram.WithLabelValues(metrics.BlockAutoIDRebase, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
   328  	if err != nil {
   329  		return 0, false, err
   330  	}
   331  	if alreadySatisfied {
   332  		return 0, true, nil
   333  	}
   334  	return requiredBase, false, err
   335  }
   336  
   337  // Rebase implements autoid.SlabPredictor Rebase interface.
   338  // The requiredBase is the minimum base value after Rebase.
   339  // The real base may be greater than the required base.
   340  func (alloc *allocator) Rebase(blockID, requiredBase int64, allocIDs bool) error {
   341  	if blockID == 0 {
   342  		return errInvalidBlockID.GenWithStack("Invalid blockID")
   343  	}
   344  
   345  	alloc.mu.Lock()
   346  	defer alloc.mu.Unlock()
   347  
   348  	if alloc.isUnsigned {
   349  		return alloc.rebase4Unsigned(blockID, uint64(requiredBase), allocIDs)
   350  	}
   351  	return alloc.rebase4Signed(blockID, requiredBase, allocIDs)
   352  }
   353  
   354  // Rebase implements autoid.SlabPredictor RebaseSeq interface.
   355  // The return value is quite same as memex function, bool means whether it should be NULL,
   356  // here it will be used in setval memex function (true meaning the set value has been satisfied, return NULL).
   357  // case1:When requiredBase is satisfied with current value, it will return (0, true, nil),
   358  // case2:When requiredBase is successfully set in, it will return (requiredBase, false, nil).
   359  // If some error occurs in the process, return it immediately.
   360  func (alloc *allocator) RebaseSeq(blockID, requiredBase int64) (int64, bool, error) {
   361  	if blockID == 0 {
   362  		return 0, false, errInvalidBlockID.GenWithStack("Invalid blockID")
   363  	}
   364  
   365  	alloc.mu.Lock()
   366  	defer alloc.mu.Unlock()
   367  	return alloc.rebase4Sequence(blockID, requiredBase)
   368  }
   369  
   370  func (alloc *allocator) GetType() SlabPredictorType {
   371  	return alloc.allocType
   372  }
   373  
   374  // NextStep return new auto id step according to previous step and consuming time.
   375  func NextStep(curStep int64, consumeDur time.Duration) int64 {
   376  	failpoint.Inject("mockAutoIDCustomize", func(val failpoint.Value) {
   377  		if val.(bool) {
   378  			failpoint.Return(3)
   379  		}
   380  	})
   381  	failpoint.Inject("mockAutoIDChange", func(val failpoint.Value) {
   382  		if val.(bool) {
   383  			failpoint.Return(step)
   384  		}
   385  	})
   386  
   387  	consumeRate := defaultConsumeTime.Seconds() / consumeDur.Seconds()
   388  	res := int64(float64(curStep) * consumeRate)
   389  	if res < minStep {
   390  		return minStep
   391  	} else if res > maxStep {
   392  		return maxStep
   393  	}
   394  	return res
   395  }
   396  
   397  // NewSlabPredictor returns a new auto increment id generator on the causetstore.
   398  func NewSlabPredictor(causetstore ekv.CausetStorage, dbID int64, isUnsigned bool, allocType SlabPredictorType, opts ...AllocOption) SlabPredictor {
   399  	alloc := &allocator{
   400  		causetstore:   causetstore,
   401  		dbID:          dbID,
   402  		isUnsigned:    isUnsigned,
   403  		step:          step,
   404  		lastAllocTime: time.Now(),
   405  		allocType:     allocType,
   406  	}
   407  	for _, fn := range opts {
   408  		fn.ApplyOn(alloc)
   409  	}
   410  	return alloc
   411  }
   412  
   413  // NewSequenceSlabPredictor returns a new sequence value generator on the causetstore.
   414  func NewSequenceSlabPredictor(causetstore ekv.CausetStorage, dbID int64, info *perceptron.SequenceInfo) SlabPredictor {
   415  	return &allocator{
   416  		causetstore: causetstore,
   417  		dbID:        dbID,
   418  		// Sequence allocator is always signed.
   419  		isUnsigned:    false,
   420  		lastAllocTime: time.Now(),
   421  		allocType:     SequenceType,
   422  		sequence:      info,
   423  	}
   424  }
   425  
   426  // NewSlabPredictorsFromTblInfo creates an array of allocators of different types with the information of perceptron.BlockInfo.
   427  func NewSlabPredictorsFromTblInfo(causetstore ekv.CausetStorage, schemaID int64, tblInfo *perceptron.BlockInfo) SlabPredictors {
   428  	var allocs []SlabPredictor
   429  	dbID := tblInfo.GetDBID(schemaID)
   430  	hasRowID := !tblInfo.PKIsHandle && !tblInfo.IsCommonHandle
   431  	hasAutoIncID := tblInfo.GetAutoIncrementDefCausInfo() != nil
   432  	if hasRowID || hasAutoIncID {
   433  		if tblInfo.AutoIdCache > 0 {
   434  			allocs = append(allocs, NewSlabPredictor(causetstore, dbID, tblInfo.IsAutoIncDefCausUnsigned(), RowIDAllocType, CustomAutoIncCacheOption(tblInfo.AutoIdCache)))
   435  		} else {
   436  			allocs = append(allocs, NewSlabPredictor(causetstore, dbID, tblInfo.IsAutoIncDefCausUnsigned(), RowIDAllocType))
   437  		}
   438  	}
   439  	if tblInfo.ContainsAutoRandomBits() {
   440  		allocs = append(allocs, NewSlabPredictor(causetstore, dbID, tblInfo.IsAutoRandomBitDefCausUnsigned(), AutoRandomType))
   441  	}
   442  	if tblInfo.IsSequence() {
   443  		allocs = append(allocs, NewSequenceSlabPredictor(causetstore, dbID, tblInfo.Sequence))
   444  	}
   445  	return NewSlabPredictors(allocs...)
   446  }
   447  
   448  // Alloc implements autoid.SlabPredictor Alloc interface.
   449  // For autoIncrement allocator, the increment and offset should always be positive in [1, 65535].
   450  // Attention:
   451  // When increment and offset is not the default value(1), the return range (min, max] need to
   452  // calculate the correct start position rather than simply the add 1 to min. Then you can derive
   453  // the successive autoID by adding increment * cnt to firstID for (n-1) times.
   454  //
   455  // Example:
   456  // (6, 13] is returned, increment = 4, offset = 1, n = 2.
   457  // 6 is the last allocated value for other autoID or handle, maybe with different increment and step,
   458  // but actually we don't care about it, all we need is to calculate the new autoID corresponding to the
   459  // increment and offset at this time now. To simplify the rule is like (ID - offset) % increment = 0,
   460  // so the first autoID should be 9, then add increment to it to get 13.
   461  func (alloc *allocator) Alloc(blockID int64, n uint64, increment, offset int64) (int64, int64, error) {
   462  	if blockID == 0 {
   463  		return 0, 0, errInvalidBlockID.GenWithStackByArgs("Invalid blockID")
   464  	}
   465  	if n == 0 {
   466  		return 0, 0, nil
   467  	}
   468  	if alloc.allocType == AutoIncrementType || alloc.allocType == RowIDAllocType {
   469  		if !validIncrementAndOffset(increment, offset) {
   470  			return 0, 0, errInvalidIncrementAndOffset.GenWithStackByArgs(increment, offset)
   471  		}
   472  	}
   473  	alloc.mu.Lock()
   474  	defer alloc.mu.Unlock()
   475  	if alloc.isUnsigned {
   476  		return alloc.alloc4Unsigned(blockID, n, increment, offset)
   477  	}
   478  	return alloc.alloc4Signed(blockID, n, increment, offset)
   479  }
   480  
   481  func (alloc *allocator) AllocSeqCache(blockID int64) (int64, int64, int64, error) {
   482  	if blockID == 0 {
   483  		return 0, 0, 0, errInvalidBlockID.GenWithStackByArgs("Invalid blockID")
   484  	}
   485  	alloc.mu.Lock()
   486  	defer alloc.mu.Unlock()
   487  	return alloc.alloc4Sequence(blockID)
   488  }
   489  
   490  func validIncrementAndOffset(increment, offset int64) bool {
   491  	return (increment >= minIncrement && increment <= maxIncrement) && (offset >= minIncrement && offset <= maxIncrement)
   492  }
   493  
   494  // CalcNeededBatchSize is used to calculate batch size for autoID allocation.
   495  // It firstly seeks to the first valid position based on increment and offset,
   496  // then plus the length remained, which could be (n-1) * increment.
   497  func CalcNeededBatchSize(base, n, increment, offset int64, isUnsigned bool) int64 {
   498  	if increment == 1 {
   499  		return n
   500  	}
   501  	if isUnsigned {
   502  		// SeekToFirstAutoIDUnSigned seeks to the next unsigned valid position.
   503  		nr := SeekToFirstAutoIDUnSigned(uint64(base), uint64(increment), uint64(offset))
   504  		// Calculate the total batch size needed.
   505  		nr += (uint64(n) - 1) * uint64(increment)
   506  		return int64(nr - uint64(base))
   507  	}
   508  	nr := SeekToFirstAutoIDSigned(base, increment, offset)
   509  	// Calculate the total batch size needed.
   510  	nr += (n - 1) * increment
   511  	return nr - base
   512  }
   513  
   514  // CalcSequenceBatchSize calculate the next sequence batch size.
   515  func CalcSequenceBatchSize(base, size, increment, offset, MIN, MAX int64) (int64, error) {
   516  	// The sequence is positive growth.
   517  	if increment > 0 {
   518  		if increment == 1 {
   519  			// Sequence is already allocated to the end.
   520  			if base >= MAX {
   521  				return 0, ErrAutoincReadFailed
   522  			}
   523  			// The rest of sequence < cache size, return the rest.
   524  			if MAX-base < size {
   525  				return MAX - base, nil
   526  			}
   527  			// The rest of sequence is adequate.
   528  			return size, nil
   529  		}
   530  		nr, ok := SeekToFirstSequenceValue(base, increment, offset, MIN, MAX)
   531  		if !ok {
   532  			return 0, ErrAutoincReadFailed
   533  		}
   534  		// The rest of sequence < cache size, return the rest.
   535  		if MAX-nr < (size-1)*increment {
   536  			return MAX - base, nil
   537  		}
   538  		return (nr - base) + (size-1)*increment, nil
   539  	}
   540  	// The sequence is negative growth.
   541  	if increment == -1 {
   542  		if base <= MIN {
   543  			return 0, ErrAutoincReadFailed
   544  		}
   545  		if base-MIN < size {
   546  			return base - MIN, nil
   547  		}
   548  		return size, nil
   549  	}
   550  	nr, ok := SeekToFirstSequenceValue(base, increment, offset, MIN, MAX)
   551  	if !ok {
   552  		return 0, ErrAutoincReadFailed
   553  	}
   554  	// The rest of sequence < cache size, return the rest.
   555  	if nr-MIN < (size-1)*(-increment) {
   556  		return base - MIN, nil
   557  	}
   558  	return (base - nr) + (size-1)*(-increment), nil
   559  }
   560  
   561  // SeekToFirstSequenceValue seeks to the next valid value (must be in range of [MIN, MAX]),
   562  // the bool indicates whether the first value is got.
   563  // The seeking formula is describe as below:
   564  //  nr  := (base + increment - offset) / increment
   565  // first := nr*increment + offset
   566  // Because formula computation will overflow Int64, so we transfer it to uint64 for distance computation.
   567  func SeekToFirstSequenceValue(base, increment, offset, MIN, MAX int64) (int64, bool) {
   568  	if increment > 0 {
   569  		// Sequence is already allocated to the end.
   570  		if base >= MAX {
   571  			return 0, false
   572  		}
   573  		uMax := EncodeIntToCmpUint(MAX)
   574  		uBase := EncodeIntToCmpUint(base)
   575  		uOffset := EncodeIntToCmpUint(offset)
   576  		uIncrement := uint64(increment)
   577  		if uMax-uBase < uIncrement {
   578  			// Enum the possible first value.
   579  			for i := uBase + 1; i <= uMax; i++ {
   580  				if (i-uOffset)%uIncrement == 0 {
   581  					return DecodeCmpUintToInt(i), true
   582  				}
   583  			}
   584  			return 0, false
   585  		}
   586  		nr := (uBase + uIncrement - uOffset) / uIncrement
   587  		nr = nr*uIncrement + uOffset
   588  		first := DecodeCmpUintToInt(nr)
   589  		return first, true
   590  	}
   591  	// Sequence is already allocated to the end.
   592  	if base <= MIN {
   593  		return 0, false
   594  	}
   595  	uMin := EncodeIntToCmpUint(MIN)
   596  	uBase := EncodeIntToCmpUint(base)
   597  	uOffset := EncodeIntToCmpUint(offset)
   598  	uIncrement := uint64(-increment)
   599  	if uBase-uMin < uIncrement {
   600  		// Enum the possible first value.
   601  		for i := uBase - 1; i >= uMin; i-- {
   602  			if (uOffset-i)%uIncrement == 0 {
   603  				return DecodeCmpUintToInt(i), true
   604  			}
   605  		}
   606  		return 0, false
   607  	}
   608  	nr := (uOffset - uBase + uIncrement) / uIncrement
   609  	nr = uOffset - nr*uIncrement
   610  	first := DecodeCmpUintToInt(nr)
   611  	return first, true
   612  }
   613  
   614  // SeekToFirstAutoIDSigned seeks to the next valid signed position.
   615  func SeekToFirstAutoIDSigned(base, increment, offset int64) int64 {
   616  	nr := (base + increment - offset) / increment
   617  	nr = nr*increment + offset
   618  	return nr
   619  }
   620  
   621  // SeekToFirstAutoIDUnSigned seeks to the next valid unsigned position.
   622  func SeekToFirstAutoIDUnSigned(base, increment, offset uint64) uint64 {
   623  	nr := (base + increment - offset) / increment
   624  	nr = nr*increment + offset
   625  	return nr
   626  }
   627  
   628  func (alloc *allocator) alloc4Signed(blockID int64, n uint64, increment, offset int64) (int64, int64, error) {
   629  	// Check offset rebase if necessary.
   630  	if offset-1 > alloc.base {
   631  		if err := alloc.rebase4Signed(blockID, offset-1, true); err != nil {
   632  			return 0, 0, err
   633  		}
   634  	}
   635  	// CalcNeededBatchSize calculates the total batch size needed.
   636  	n1 := CalcNeededBatchSize(alloc.base, int64(n), increment, offset, alloc.isUnsigned)
   637  
   638  	// Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this.
   639  	if math.MaxInt64-alloc.base <= n1 {
   640  		return 0, 0, ErrAutoincReadFailed
   641  	}
   642  	// The local rest is not enough for allocN, skip it.
   643  	if alloc.base+n1 > alloc.end {
   644  		var newBase, newEnd int64
   645  		startTime := time.Now()
   646  		nextStep := alloc.step
   647  		if !alloc.customStep {
   648  			// Although it may skip a segment here, we still think it is consumed.
   649  			consumeDur := startTime.Sub(alloc.lastAllocTime)
   650  			nextStep = NextStep(alloc.step, consumeDur)
   651  		}
   652  		err := ekv.RunInNewTxn(alloc.causetstore, true, func(txn ekv.Transaction) error {
   653  			m := spacetime.NewMeta(txn)
   654  			var err1 error
   655  			newBase, err1 = getAutoIDByAllocType(m, alloc.dbID, blockID, alloc.allocType)
   656  			if err1 != nil {
   657  				return err1
   658  			}
   659  			// CalcNeededBatchSize calculates the total batch size needed on global base.
   660  			n1 = CalcNeededBatchSize(newBase, int64(n), increment, offset, alloc.isUnsigned)
   661  			// Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch.
   662  			if nextStep < n1 {
   663  				nextStep = n1
   664  			}
   665  			tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, nextStep)
   666  			// The global rest is not enough for alloc.
   667  			if tmpStep < n1 {
   668  				return ErrAutoincReadFailed
   669  			}
   670  			newEnd, err1 = generateAutoIDByAllocType(m, alloc.dbID, blockID, tmpStep, alloc.allocType)
   671  			return err1
   672  		})
   673  		metrics.AutoIDHistogram.WithLabelValues(metrics.BlockAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
   674  		if err != nil {
   675  			return 0, 0, err
   676  		}
   677  		// CausetStore the step for non-customized-step allocator to calculate next dynamic step.
   678  		if !alloc.customStep {
   679  			alloc.step = nextStep
   680  		}
   681  		alloc.lastAllocTime = time.Now()
   682  		if newBase == math.MaxInt64 {
   683  			return 0, 0, ErrAutoincReadFailed
   684  		}
   685  		alloc.base, alloc.end = newBase, newEnd
   686  	}
   687  	logutil.Logger(context.TODO()).Debug("alloc N signed ID",
   688  		zap.Uint64("from ID", uint64(alloc.base)),
   689  		zap.Uint64("to ID", uint64(alloc.base+n1)),
   690  		zap.Int64("causet ID", blockID),
   691  		zap.Int64("database ID", alloc.dbID))
   692  	min := alloc.base
   693  	alloc.base += n1
   694  	return min, alloc.base, nil
   695  }
   696  
   697  func (alloc *allocator) alloc4Unsigned(blockID int64, n uint64, increment, offset int64) (int64, int64, error) {
   698  	// Check offset rebase if necessary.
   699  	if uint64(offset-1) > uint64(alloc.base) {
   700  		if err := alloc.rebase4Unsigned(blockID, uint64(offset-1), true); err != nil {
   701  			return 0, 0, err
   702  		}
   703  	}
   704  	// CalcNeededBatchSize calculates the total batch size needed.
   705  	n1 := CalcNeededBatchSize(alloc.base, int64(n), increment, offset, alloc.isUnsigned)
   706  
   707  	// Condition alloc.base+n1 > alloc.end will overflow when alloc.base + n1 > MaxInt64. So need this.
   708  	if math.MaxUint64-uint64(alloc.base) <= uint64(n1) {
   709  		return 0, 0, ErrAutoincReadFailed
   710  	}
   711  	// The local rest is not enough for alloc, skip it.
   712  	if uint64(alloc.base)+uint64(n1) > uint64(alloc.end) {
   713  		var newBase, newEnd int64
   714  		startTime := time.Now()
   715  		nextStep := alloc.step
   716  		if !alloc.customStep {
   717  			// Although it may skip a segment here, we still treat it as consumed.
   718  			consumeDur := startTime.Sub(alloc.lastAllocTime)
   719  			nextStep = NextStep(alloc.step, consumeDur)
   720  		}
   721  		err := ekv.RunInNewTxn(alloc.causetstore, true, func(txn ekv.Transaction) error {
   722  			m := spacetime.NewMeta(txn)
   723  			var err1 error
   724  			newBase, err1 = getAutoIDByAllocType(m, alloc.dbID, blockID, alloc.allocType)
   725  			if err1 != nil {
   726  				return err1
   727  			}
   728  			// CalcNeededBatchSize calculates the total batch size needed on new base.
   729  			n1 = CalcNeededBatchSize(newBase, int64(n), increment, offset, alloc.isUnsigned)
   730  			// Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch.
   731  			if nextStep < n1 {
   732  				nextStep = n1
   733  			}
   734  			tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(nextStep)))
   735  			// The global rest is not enough for alloc.
   736  			if tmpStep < n1 {
   737  				return ErrAutoincReadFailed
   738  			}
   739  			newEnd, err1 = generateAutoIDByAllocType(m, alloc.dbID, blockID, tmpStep, alloc.allocType)
   740  			return err1
   741  		})
   742  		metrics.AutoIDHistogram.WithLabelValues(metrics.BlockAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
   743  		if err != nil {
   744  			return 0, 0, err
   745  		}
   746  		// CausetStore the step for non-customized-step allocator to calculate next dynamic step.
   747  		if !alloc.customStep {
   748  			alloc.step = nextStep
   749  		}
   750  		alloc.lastAllocTime = time.Now()
   751  		if uint64(newBase) == math.MaxUint64 {
   752  			return 0, 0, ErrAutoincReadFailed
   753  		}
   754  		alloc.base, alloc.end = newBase, newEnd
   755  	}
   756  	logutil.Logger(context.TODO()).Debug("alloc unsigned ID",
   757  		zap.Uint64(" from ID", uint64(alloc.base)),
   758  		zap.Uint64("to ID", uint64(alloc.base+n1)),
   759  		zap.Int64("causet ID", blockID),
   760  		zap.Int64("database ID", alloc.dbID))
   761  	min := alloc.base
   762  	// Use uint64 n directly.
   763  	alloc.base = int64(uint64(alloc.base) + uint64(n1))
   764  	return min, alloc.base, nil
   765  }
   766  
   767  // alloc4Sequence is used to alloc value for sequence, there are several aspects different from autoid logic.
   768  // 1: sequence allocation don't need check rebase.
   769  // 2: sequence allocation don't need auto step.
   770  // 3: sequence allocation may have negative growth.
   771  // 4: sequence allocation batch length can be dissatisfied.
   772  // 5: sequence batch allocation will be consumed immediately.
   773  func (alloc *allocator) alloc4Sequence(blockID int64) (min int64, max int64, round int64, err error) {
   774  	increment := alloc.sequence.Increment
   775  	offset := alloc.sequence.Start
   776  	minValue := alloc.sequence.MinValue
   777  	maxValue := alloc.sequence.MaxValue
   778  	cacheSize := alloc.sequence.CacheValue
   779  	if !alloc.sequence.Cache {
   780  		cacheSize = 1
   781  	}
   782  
   783  	var newBase, newEnd int64
   784  	startTime := time.Now()
   785  	err = ekv.RunInNewTxn(alloc.causetstore, true, func(txn ekv.Transaction) error {
   786  		m := spacetime.NewMeta(txn)
   787  		var (
   788  			err1    error
   789  			seqStep int64
   790  		)
   791  		// Get the real offset if the sequence is in cycle.
   792  		// round is used to count cycle times in sequence with cycle option.
   793  		if alloc.sequence.Cycle {
   794  			// GetSequenceCycle is used to get the flag `round`, which indicates whether the sequence is already in cycle.
   795  			round, err1 = m.GetSequenceCycle(alloc.dbID, blockID)
   796  			if err1 != nil {
   797  				return err1
   798  			}
   799  			if round > 0 {
   800  				if increment > 0 {
   801  					offset = alloc.sequence.MinValue
   802  				} else {
   803  					offset = alloc.sequence.MaxValue
   804  				}
   805  			}
   806  		}
   807  
   808  		// Get the global new base.
   809  		newBase, err1 = getAutoIDByAllocType(m, alloc.dbID, blockID, alloc.allocType)
   810  		if err1 != nil {
   811  			return err1
   812  		}
   813  
   814  		// CalcNeededBatchSize calculates the total batch size needed.
   815  		seqStep, err1 = CalcSequenceBatchSize(newBase, cacheSize, increment, offset, minValue, maxValue)
   816  
   817  		if err1 != nil && err1 == ErrAutoincReadFailed {
   818  			if !alloc.sequence.Cycle {
   819  				return err1
   820  			}
   821  			// Reset the sequence base and offset.
   822  			if alloc.sequence.Increment > 0 {
   823  				newBase = alloc.sequence.MinValue - 1
   824  				offset = alloc.sequence.MinValue
   825  			} else {
   826  				newBase = alloc.sequence.MaxValue + 1
   827  				offset = alloc.sequence.MaxValue
   828  			}
   829  			err1 = m.SetSequenceValue(alloc.dbID, blockID, newBase)
   830  			if err1 != nil {
   831  				return err1
   832  			}
   833  
   834  			// Reset sequence round state value.
   835  			round++
   836  			// SetSequenceCycle is used to causetstore the flag `round` which indicates whether the sequence is already in cycle.
   837  			// round > 0 means the sequence is already in cycle, so the offset should be minvalue / maxvalue rather than sequence.start.
   838  			// MilevaDB is a stateless node, it should know whether the sequence is already in cycle when restart.
   839  			err1 = m.SetSequenceCycle(alloc.dbID, blockID, round)
   840  			if err1 != nil {
   841  				return err1
   842  			}
   843  
   844  			// Recompute the sequence next batch size.
   845  			seqStep, err1 = CalcSequenceBatchSize(newBase, cacheSize, increment, offset, minValue, maxValue)
   846  			if err1 != nil {
   847  				return err1
   848  			}
   849  		}
   850  		var delta int64
   851  		if alloc.sequence.Increment > 0 {
   852  			delta = seqStep
   853  		} else {
   854  			delta = -seqStep
   855  		}
   856  		newEnd, err1 = generateAutoIDByAllocType(m, alloc.dbID, blockID, delta, alloc.allocType)
   857  		return err1
   858  	})
   859  
   860  	// TODO: sequence metrics
   861  	metrics.AutoIDHistogram.WithLabelValues(metrics.BlockAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
   862  	if err != nil {
   863  		return 0, 0, 0, err
   864  	}
   865  	logutil.Logger(context.TODO()).Debug("alloc sequence value",
   866  		zap.Uint64(" from value", uint64(newBase)),
   867  		zap.Uint64("to value", uint64(newEnd)),
   868  		zap.Int64("causet ID", blockID),
   869  		zap.Int64("database ID", alloc.dbID))
   870  	return newBase, newEnd, round, nil
   871  }
   872  
   873  func getAutoIDByAllocType(m *spacetime.Meta, dbID, blockID int64, allocType SlabPredictorType) (int64, error) {
   874  	switch allocType {
   875  	// Currently, event id allocator and auto-increment value allocator shares the same key-value pair.
   876  	case RowIDAllocType, AutoIncrementType:
   877  		return m.GetAutoBlockID(dbID, blockID)
   878  	case AutoRandomType:
   879  		return m.GetAutoRandomID(dbID, blockID)
   880  	case SequenceType:
   881  		return m.GetSequenceValue(dbID, blockID)
   882  	default:
   883  		return 0, ErrInvalidSlabPredictorType.GenWithStackByArgs()
   884  	}
   885  }
   886  
   887  func generateAutoIDByAllocType(m *spacetime.Meta, dbID, blockID, step int64, allocType SlabPredictorType) (int64, error) {
   888  	switch allocType {
   889  	case RowIDAllocType, AutoIncrementType:
   890  		return m.GenAutoBlockID(dbID, blockID, step)
   891  	case AutoRandomType:
   892  		return m.GenAutoRandomID(dbID, blockID, step)
   893  	case SequenceType:
   894  		return m.GenSequenceValue(dbID, blockID, step)
   895  	default:
   896  		return 0, ErrInvalidSlabPredictorType.GenWithStackByArgs()
   897  	}
   898  }
   899  
   900  const signMask uint64 = 0x8000000000000000
   901  
   902  // EncodeIntToCmpUint make int v to comparable uint type
   903  func EncodeIntToCmpUint(v int64) uint64 {
   904  	return uint64(v) ^ signMask
   905  }
   906  
   907  // DecodeCmpUintToInt decodes the u that encoded by EncodeIntToCmpUint
   908  func DecodeCmpUintToInt(u uint64) int64 {
   909  	return int64(u ^ signMask)
   910  }
   911  
   912  // TestModifyBaseAndEndInjection exported for testing modifying the base and end.
   913  func TestModifyBaseAndEndInjection(alloc SlabPredictor, base, end int64) {
   914  	alloc.(*allocator).mu.Lock()
   915  	alloc.(*allocator).base = base
   916  	alloc.(*allocator).end = end
   917  	alloc.(*allocator).mu.Unlock()
   918  }
   919  
   920  // AutoRandomIDLayout is used to calculate the bits length of different section in auto_random id.
   921  // The primary key with auto_random can only be `bigint` defCausumn, the total layout length of auto random is 64 bits.
   922  // These are two type of layout:
   923  // 1. Signed bigint:
   924  //   | [sign_bit] | [shard_bits] | [incremental_bits] |
   925  //   sign_bit(1 fixed) + shard_bits(15 max) + incremental_bits(the rest) = total_layout_bits(64 fixed)
   926  // 2. Unsigned bigint:
   927  //   | [shard_bits] | [incremental_bits] |
   928  //   shard_bits(15 max) + incremental_bits(the rest) = total_layout_bits(64 fixed)
   929  // Please always use NewAutoRandomIDLayout() to instantiate.
   930  type AutoRandomIDLayout struct {
   931  	FieldType *types.FieldType
   932  	ShardBits uint64
   933  	// Derived fields.
   934  	TypeBitsLength  uint64
   935  	IncrementalBits uint64
   936  	HasSignBit      bool
   937  }
   938  
   939  // NewAutoRandomIDLayout create an instance of AutoRandomIDLayout.
   940  func NewAutoRandomIDLayout(fieldType *types.FieldType, shardBits uint64) *AutoRandomIDLayout {
   941  	typeBitsLength := uint64(allegrosql.DefaultLengthOfMysqlTypes[allegrosql.TypeLonglong] * 8)
   942  	incrementalBits := typeBitsLength - shardBits
   943  	hasSignBit := !allegrosql.HasUnsignedFlag(fieldType.Flag)
   944  	if hasSignBit {
   945  		incrementalBits -= 1
   946  	}
   947  	return &AutoRandomIDLayout{
   948  		FieldType:       fieldType,
   949  		ShardBits:       shardBits,
   950  		TypeBitsLength:  typeBitsLength,
   951  		IncrementalBits: incrementalBits,
   952  		HasSignBit:      hasSignBit,
   953  	}
   954  }
   955  
   956  // IncrementalBitsCapacity returns the max capacity of incremental section of the current layout.
   957  func (l *AutoRandomIDLayout) IncrementalBitsCapacity() uint64 {
   958  	return uint64(math.Pow(2, float64(l.IncrementalBits))) - 1
   959  }
   960  
   961  // IncrementalMask returns 00..0[11..1], where [xxx] is the incremental section of the current layout.
   962  func (l *AutoRandomIDLayout) IncrementalMask() int64 {
   963  	return (1 << l.IncrementalBits) - 1
   964  }