github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/soliton/admin/admin.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package admin
    15  
    16  import (
    17  	"context"
    18  	"encoding/json"
    19  	"fmt"
    20  	"math"
    21  	"sort"
    22  	"time"
    23  
    24  	"github.com/whtcorpsinc/BerolinaSQL/allegrosql"
    25  	"github.com/whtcorpsinc/BerolinaSQL/perceptron"
    26  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    27  	"github.com/whtcorpsinc/errors"
    28  	"github.com/whtcorpsinc/milevadb/blockcodec"
    29  	"github.com/whtcorpsinc/milevadb/causet"
    30  	"github.com/whtcorpsinc/milevadb/ekv"
    31  	"github.com/whtcorpsinc/milevadb/errno"
    32  	"github.com/whtcorpsinc/milevadb/memex"
    33  	"github.com/whtcorpsinc/milevadb/soliton"
    34  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    35  	causetDecoder "github.com/whtcorpsinc/milevadb/soliton/rowCausetDecoder"
    36  	"github.com/whtcorpsinc/milevadb/soliton/sqlexec"
    37  	"github.com/whtcorpsinc/milevadb/spacetime"
    38  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    39  	"github.com/whtcorpsinc/milevadb/types"
    40  	"go.uber.org/zap"
    41  )
    42  
    43  // DBSInfo is for DBS information.
    44  type DBSInfo struct {
    45  	SchemaVer   int64
    46  	ReorgHandle ekv.Handle        // It's only used for DBS information.
    47  	Jobs        []*perceptron.Job // It's the currently running jobs.
    48  }
    49  
    50  // GetDBSInfo returns DBS information.
    51  func GetDBSInfo(txn ekv.Transaction) (*DBSInfo, error) {
    52  	var err error
    53  	info := &DBSInfo{}
    54  	t := spacetime.NewMeta(txn)
    55  
    56  	info.Jobs = make([]*perceptron.Job, 0, 2)
    57  	job, err := t.GetDBSJobByIdx(0)
    58  	if err != nil {
    59  		return nil, errors.Trace(err)
    60  	}
    61  	if job != nil {
    62  		info.Jobs = append(info.Jobs, job)
    63  	}
    64  	addIdxJob, err := t.GetDBSJobByIdx(0, spacetime.AddIndexJobListKey)
    65  	if err != nil {
    66  		return nil, errors.Trace(err)
    67  	}
    68  	if addIdxJob != nil {
    69  		info.Jobs = append(info.Jobs, addIdxJob)
    70  	}
    71  
    72  	info.SchemaVer, err = t.GetSchemaVersion()
    73  	if err != nil {
    74  		return nil, errors.Trace(err)
    75  	}
    76  	if addIdxJob == nil {
    77  		return info, nil
    78  	}
    79  
    80  	tbl, err := t.GetBlock(addIdxJob.SchemaID, addIdxJob.BlockID)
    81  	if err != nil {
    82  		return info, nil
    83  	}
    84  	info.ReorgHandle, _, _, err = t.GetDBSReorgHandle(addIdxJob, tbl.IsCommonHandle)
    85  	if err != nil {
    86  		return nil, errors.Trace(err)
    87  	}
    88  
    89  	return info, nil
    90  }
    91  
    92  // IsJobRollbackable checks whether the job can be rollback.
    93  func IsJobRollbackable(job *perceptron.Job) bool {
    94  	switch job.Type {
    95  	case perceptron.CausetActionDropIndex, perceptron.CausetActionDropPrimaryKey:
    96  		// We can't cancel if index current state is in StateDeleteOnly or StateDeleteReorganization or StateWriteOnly, otherwise there will be an inconsistent issue between record and index.
    97  		// In WriteOnly state, we can rollback for normal index but can't rollback for memex index(need to drop hidden defCausumn). Since we can't
    98  		// know the type of index here, we consider all indices except primary index as non-rollbackable.
    99  		// TODO: distinguish normal index and memex index so that we can rollback `DropIndex` for normal index in WriteOnly state.
   100  		// TODO: make DropPrimaryKey rollbackable in WriteOnly, it need to deal with some tests.
   101  		if job.SchemaState == perceptron.StateDeleteOnly ||
   102  			job.SchemaState == perceptron.StateDeleteReorganization ||
   103  			job.SchemaState == perceptron.StateWriteOnly {
   104  			return false
   105  		}
   106  	case perceptron.CausetActionDropSchema, perceptron.CausetActionDropBlock, perceptron.CausetActionDropSequence:
   107  		// To simplify the rollback logic, cannot be canceled in the following states.
   108  		if job.SchemaState == perceptron.StateWriteOnly ||
   109  			job.SchemaState == perceptron.StateDeleteOnly {
   110  			return false
   111  		}
   112  	case perceptron.CausetActionAddBlockPartition:
   113  		return job.SchemaState == perceptron.StateNone || job.SchemaState == perceptron.StateReplicaOnly
   114  	case perceptron.CausetActionDropDeferredCauset, perceptron.CausetActionDropDeferredCausets, perceptron.CausetActionDropBlockPartition,
   115  		perceptron.CausetActionRebaseAutoID, perceptron.CausetActionShardRowID,
   116  		perceptron.CausetActionTruncateBlock, perceptron.CausetActionAddForeignKey,
   117  		perceptron.CausetActionDropForeignKey, perceptron.CausetActionRenameBlock,
   118  		perceptron.CausetActionModifyBlockCharsetAndDefCauslate, perceptron.CausetActionTruncateBlockPartition,
   119  		perceptron.CausetActionModifySchemaCharsetAndDefCauslate, perceptron.CausetActionRepairBlock, perceptron.CausetActionModifyBlockAutoIdCache:
   120  		return job.SchemaState == perceptron.StateNone
   121  	}
   122  	return true
   123  }
   124  
   125  // CancelJobs cancels the DBS jobs.
   126  func CancelJobs(txn ekv.Transaction, ids []int64) ([]error, error) {
   127  	if len(ids) == 0 {
   128  		return nil, nil
   129  	}
   130  
   131  	errs := make([]error, len(ids))
   132  	t := spacetime.NewMeta(txn)
   133  	generalJobs, err := getDBSJobsInQueue(t, spacetime.DefaultJobListKey)
   134  	if err != nil {
   135  		return nil, errors.Trace(err)
   136  	}
   137  	addIdxJobs, err := getDBSJobsInQueue(t, spacetime.AddIndexJobListKey)
   138  	if err != nil {
   139  		return nil, errors.Trace(err)
   140  	}
   141  	jobs := append(generalJobs, addIdxJobs...)
   142  
   143  	for i, id := range ids {
   144  		found := false
   145  		for j, job := range jobs {
   146  			if id != job.ID {
   147  				logutil.BgLogger().Debug("the job that needs to be canceled isn't equal to current job",
   148  					zap.Int64("need to canceled job ID", id),
   149  					zap.Int64("current job ID", job.ID))
   150  				continue
   151  			}
   152  			found = true
   153  			// These states can't be cancelled.
   154  			if job.IsDone() || job.IsSynced() {
   155  				errs[i] = ErrCancelFinishedDBSJob.GenWithStackByArgs(id)
   156  				continue
   157  			}
   158  			// If the state is rolling back, it means the work is cleaning the data after cancelling the job.
   159  			if job.IsCancelled() || job.IsRollingback() || job.IsRollbackDone() {
   160  				continue
   161  			}
   162  			if !IsJobRollbackable(job) {
   163  				errs[i] = ErrCannotCancelDBSJob.GenWithStackByArgs(job.ID)
   164  				continue
   165  			}
   166  
   167  			job.State = perceptron.JobStateCancelling
   168  			// Make sure RawArgs isn't overwritten.
   169  			err := json.Unmarshal(job.RawArgs, &job.Args)
   170  			if err != nil {
   171  				errs[i] = errors.Trace(err)
   172  				continue
   173  			}
   174  			if job.Type == perceptron.CausetActionAddIndex || job.Type == perceptron.CausetActionAddPrimaryKey {
   175  				offset := int64(j - len(generalJobs))
   176  				err = t.UFIDelateDBSJob(offset, job, true, spacetime.AddIndexJobListKey)
   177  			} else {
   178  				err = t.UFIDelateDBSJob(int64(j), job, true)
   179  			}
   180  			if err != nil {
   181  				errs[i] = errors.Trace(err)
   182  			}
   183  		}
   184  		if !found {
   185  			errs[i] = ErrDBSJobNotFound.GenWithStackByArgs(id)
   186  		}
   187  	}
   188  	return errs, nil
   189  }
   190  
   191  func getDBSJobsInQueue(t *spacetime.Meta, jobListKey spacetime.JobListKeyType) ([]*perceptron.Job, error) {
   192  	cnt, err := t.DBSJobQueueLen(jobListKey)
   193  	if err != nil {
   194  		return nil, errors.Trace(err)
   195  	}
   196  	jobs := make([]*perceptron.Job, cnt)
   197  	for i := range jobs {
   198  		jobs[i], err = t.GetDBSJobByIdx(int64(i), jobListKey)
   199  		if err != nil {
   200  			return nil, errors.Trace(err)
   201  		}
   202  	}
   203  	return jobs, nil
   204  }
   205  
   206  // GetDBSJobs get all DBS jobs and sorts jobs by job.ID.
   207  func GetDBSJobs(txn ekv.Transaction) ([]*perceptron.Job, error) {
   208  	t := spacetime.NewMeta(txn)
   209  	generalJobs, err := getDBSJobsInQueue(t, spacetime.DefaultJobListKey)
   210  	if err != nil {
   211  		return nil, errors.Trace(err)
   212  	}
   213  	addIdxJobs, err := getDBSJobsInQueue(t, spacetime.AddIndexJobListKey)
   214  	if err != nil {
   215  		return nil, errors.Trace(err)
   216  	}
   217  	jobs := append(generalJobs, addIdxJobs...)
   218  	sort.Sort(jobArray(jobs))
   219  	return jobs, nil
   220  }
   221  
   222  type jobArray []*perceptron.Job
   223  
   224  func (v jobArray) Len() int {
   225  	return len(v)
   226  }
   227  
   228  func (v jobArray) Less(i, j int) bool {
   229  	return v[i].ID < v[j].ID
   230  }
   231  
   232  func (v jobArray) Swap(i, j int) {
   233  	v[i], v[j] = v[j], v[i]
   234  }
   235  
   236  // MaxHistoryJobs is exported for testing.
   237  const MaxHistoryJobs = 10
   238  
   239  // DefNumHistoryJobs is default value of the default number of history job
   240  const DefNumHistoryJobs = 10
   241  
   242  // GetHistoryDBSJobs returns the DBS history jobs and an error.
   243  // The maximum count of history jobs is num.
   244  func GetHistoryDBSJobs(txn ekv.Transaction, maxNumJobs int) ([]*perceptron.Job, error) {
   245  	t := spacetime.NewMeta(txn)
   246  	jobs, err := t.GetLastNHistoryDBSJobs(maxNumJobs)
   247  	if err != nil {
   248  		return nil, errors.Trace(err)
   249  	}
   250  	return jobs, nil
   251  }
   252  
   253  // IterHistoryDBSJobs iterates history DBS jobs until the `finishFn` return true or error.
   254  func IterHistoryDBSJobs(txn ekv.Transaction, finishFn func([]*perceptron.Job) (bool, error)) error {
   255  	txnMeta := spacetime.NewMeta(txn)
   256  	iter, err := txnMeta.GetLastHistoryDBSJobsIterator()
   257  	if err != nil {
   258  		return err
   259  	}
   260  	cacheJobs := make([]*perceptron.Job, 0, DefNumHistoryJobs)
   261  	for {
   262  		cacheJobs, err = iter.GetLastJobs(DefNumHistoryJobs, cacheJobs)
   263  		if err != nil || len(cacheJobs) == 0 {
   264  			return err
   265  		}
   266  		finish, err := finishFn(cacheJobs)
   267  		if err != nil || finish {
   268  			return err
   269  		}
   270  	}
   271  }
   272  
   273  // IterAllDBSJobs will iterates running DBS jobs first, return directly if `finishFn` return true or error,
   274  // then iterates history DBS jobs until the `finishFn` return true or error.
   275  func IterAllDBSJobs(txn ekv.Transaction, finishFn func([]*perceptron.Job) (bool, error)) error {
   276  	jobs, err := GetDBSJobs(txn)
   277  	if err != nil {
   278  		return err
   279  	}
   280  
   281  	finish, err := finishFn(jobs)
   282  	if err != nil || finish {
   283  		return err
   284  	}
   285  	return IterHistoryDBSJobs(txn, finishFn)
   286  }
   287  
   288  // RecordData is the record data composed of a handle and values.
   289  type RecordData struct {
   290  	Handle ekv.Handle
   291  	Values []types.Causet
   292  }
   293  
   294  func getCount(ctx stochastikctx.Context, allegrosql string) (int64, error) {
   295  	rows, _, err := ctx.(sqlexec.RestrictedALLEGROSQLInterlockingDirectorate).InterDircRestrictedALLEGROSQLWithSnapshot(allegrosql)
   296  	if err != nil {
   297  		return 0, errors.Trace(err)
   298  	}
   299  	if len(rows) != 1 {
   300  		return 0, errors.Errorf("can not get count, allegrosql %s result rows %d", allegrosql, len(rows))
   301  	}
   302  	return rows[0].GetInt64(0), nil
   303  }
   304  
   305  // Count greater Types
   306  const (
   307  	// TblCntGreater means that the number of causet rows is more than the number of index rows.
   308  	TblCntGreater byte = 1
   309  	// IdxCntGreater means that the number of index rows is more than the number of causet rows.
   310  	IdxCntGreater byte = 2
   311  )
   312  
   313  // ChecHoTTicesCount compares indices count with causet count.
   314  // It returns the count greater type, the index offset and an error.
   315  // It returns nil if the count from the index is equal to the count from the causet defCausumns,
   316  // otherwise it returns an error and the corresponding index's offset.
   317  func ChecHoTTicesCount(ctx stochastikctx.Context, dbName, blockName string, indices []string) (byte, int, error) {
   318  	// Here we need check all indexes, includes invisible index
   319  	ctx.GetStochastikVars().OptimizerUseInvisibleIndexes = true
   320  	// Add `` for some names like `causet name`.
   321  	allegrosql := fmt.Sprintf("SELECT COUNT(*) FROM `%s`.`%s` USE INDEX()", dbName, blockName)
   322  	tblCnt, err := getCount(ctx, allegrosql)
   323  	if err != nil {
   324  		return 0, 0, errors.Trace(err)
   325  	}
   326  	for i, idx := range indices {
   327  		allegrosql = fmt.Sprintf("SELECT COUNT(*) FROM `%s`.`%s` USE INDEX(`%s`)", dbName, blockName, idx)
   328  		idxCnt, err := getCount(ctx, allegrosql)
   329  		if err != nil {
   330  			return 0, i, errors.Trace(err)
   331  		}
   332  		logutil.Logger(context.Background()).Info("check indices count",
   333  			zap.String("causet", blockName), zap.Int64("cnt", tblCnt), zap.Reflect("index", idx), zap.Int64("cnt", idxCnt))
   334  		if tblCnt == idxCnt {
   335  			continue
   336  		}
   337  
   338  		var ret byte
   339  		if tblCnt > idxCnt {
   340  			ret = TblCntGreater
   341  		} else if idxCnt > tblCnt {
   342  			ret = IdxCntGreater
   343  		}
   344  		return ret, i, ErrAdminCheckBlock.GenWithStack("causet count %d != index(%s) count %d", tblCnt, idx, idxCnt)
   345  	}
   346  	return 0, 0, nil
   347  }
   348  
   349  // CheckRecordAndIndex is exported for testing.
   350  func CheckRecordAndIndex(sessCtx stochastikctx.Context, txn ekv.Transaction, t causet.Block, idx causet.Index) error {
   351  	sc := sessCtx.GetStochastikVars().StmtCtx
   352  	defcaus := make([]*causet.DeferredCauset, len(idx.Meta().DeferredCausets))
   353  	for i, defCaus := range idx.Meta().DeferredCausets {
   354  		defcaus[i] = t.DefCauss()[defCaus.Offset]
   355  	}
   356  
   357  	startKey := t.RecordKey(ekv.IntHandle(math.MinInt64))
   358  	filterFunc := func(h1 ekv.Handle, vals1 []types.Causet, defcaus []*causet.DeferredCauset) (bool, error) {
   359  		for i, val := range vals1 {
   360  			defCaus := defcaus[i]
   361  			if val.IsNull() {
   362  				if allegrosql.HasNotNullFlag(defCaus.Flag) && defCaus.ToInfo().OriginDefaultValue == nil {
   363  					return false, errors.Errorf("DeferredCauset %v define as not null, but can't find the value where handle is %v", defCaus.Name, h1)
   364  				}
   365  				// NULL value is regarded as its default value.
   366  				defCausDefVal, err := causet.GetDefCausOriginDefaultValue(sessCtx, defCaus.ToInfo())
   367  				if err != nil {
   368  					return false, errors.Trace(err)
   369  				}
   370  				vals1[i] = defCausDefVal
   371  			}
   372  		}
   373  		isExist, h2, err := idx.Exist(sc, txn.GetUnionStore(), vals1, h1)
   374  		if ekv.ErrKeyExists.Equal(err) {
   375  			record1 := &RecordData{Handle: h1, Values: vals1}
   376  			record2 := &RecordData{Handle: h2, Values: vals1}
   377  			return false, ErrDataInConsistent.GenWithStack("index:%#v != record:%#v", record2, record1)
   378  		}
   379  		if err != nil {
   380  			return false, errors.Trace(err)
   381  		}
   382  		if !isExist {
   383  			record := &RecordData{Handle: h1, Values: vals1}
   384  			return false, ErrDataInConsistent.GenWithStack("index:%#v != record:%#v", nil, record)
   385  		}
   386  
   387  		return true, nil
   388  	}
   389  	err := iterRecords(sessCtx, txn, t, startKey, defcaus, filterFunc)
   390  	if err != nil {
   391  		return errors.Trace(err)
   392  	}
   393  
   394  	return nil
   395  }
   396  
   397  func makeRowCausetDecoder(t causet.Block, sctx stochastikctx.Context) (*causetDecoder.RowCausetDecoder, error) {
   398  	dbName := perceptron.NewCIStr(sctx.GetStochastikVars().CurrentDB)
   399  	exprDefCauss, _, err := memex.DeferredCausetInfos2DeferredCausetsAndNames(sctx, dbName, t.Meta().Name, t.Meta().DefCauss(), t.Meta())
   400  	if err != nil {
   401  		return nil, err
   402  	}
   403  	mockSchema := memex.NewSchema(exprDefCauss...)
   404  	decodeDefCaussMap := causetDecoder.BuildFullDecodeDefCausMap(t.DefCauss(), mockSchema)
   405  
   406  	return causetDecoder.NewRowCausetDecoder(t, t.DefCauss(), decodeDefCaussMap), nil
   407  }
   408  
   409  func iterRecords(sessCtx stochastikctx.Context, retriever ekv.Retriever, t causet.Block, startKey ekv.Key, defcaus []*causet.DeferredCauset, fn causet.RecordIterFunc) error {
   410  	prefix := t.RecordPrefix()
   411  	keyUpperBound := prefix.PrefixNext()
   412  
   413  	it, err := retriever.Iter(startKey, keyUpperBound)
   414  	if err != nil {
   415  		return errors.Trace(err)
   416  	}
   417  	defer it.Close()
   418  
   419  	if !it.Valid() {
   420  		return nil
   421  	}
   422  
   423  	logutil.BgLogger().Debug("record",
   424  		zap.Stringer("startKey", startKey),
   425  		zap.Stringer("key", it.Key()),
   426  		zap.Binary("value", it.Value()))
   427  	rowCausetDecoder, err := makeRowCausetDecoder(t, sessCtx)
   428  	if err != nil {
   429  		return err
   430  	}
   431  	for it.Valid() && it.Key().HasPrefix(prefix) {
   432  		// first ekv pair is event dagger information.
   433  		// TODO: check valid dagger
   434  		// get event handle
   435  		handle, err := blockcodec.DecodeRowKey(it.Key())
   436  		if err != nil {
   437  			return errors.Trace(err)
   438  		}
   439  
   440  		rowMap, err := rowCausetDecoder.DecodeAndEvalRowWithMap(sessCtx, handle, it.Value(), sessCtx.GetStochastikVars().Location(), time.UTC, nil)
   441  		if err != nil {
   442  			return errors.Trace(err)
   443  		}
   444  		data := make([]types.Causet, 0, len(defcaus))
   445  		for _, defCaus := range defcaus {
   446  			data = append(data, rowMap[defCaus.ID])
   447  		}
   448  		more, err := fn(handle, data, defcaus)
   449  		if !more || err != nil {
   450  			return errors.Trace(err)
   451  		}
   452  
   453  		rk := t.RecordKey(handle)
   454  		err = ekv.NextUntil(it, soliton.RowKeyPrefixFilter(rk))
   455  		if err != nil {
   456  			return errors.Trace(err)
   457  		}
   458  	}
   459  
   460  	return nil
   461  }
   462  
   463  var (
   464  	// ErrDataInConsistent indicate that meets inconsistent data.
   465  	ErrDataInConsistent = terror.ClassAdmin.New(errno.ErrDataInConsistent, errno.MyALLEGROSQLErrName[errno.ErrDataInConsistent])
   466  	// ErrDBSJobNotFound indicates the job id was not found.
   467  	ErrDBSJobNotFound = terror.ClassAdmin.New(errno.ErrDBSJobNotFound, errno.MyALLEGROSQLErrName[errno.ErrDBSJobNotFound])
   468  	// ErrCancelFinishedDBSJob returns when cancel a finished dbs job.
   469  	ErrCancelFinishedDBSJob = terror.ClassAdmin.New(errno.ErrCancelFinishedDBSJob, errno.MyALLEGROSQLErrName[errno.ErrCancelFinishedDBSJob])
   470  	// ErrCannotCancelDBSJob returns when cancel a almost finished dbs job, because cancel in now may cause data inconsistency.
   471  	ErrCannotCancelDBSJob = terror.ClassAdmin.New(errno.ErrCannotCancelDBSJob, errno.MyALLEGROSQLErrName[errno.ErrCannotCancelDBSJob])
   472  	// ErrAdminCheckBlock returns when the causet records is inconsistent with the index values.
   473  	ErrAdminCheckBlock = terror.ClassAdmin.New(errno.ErrAdminCheckBlock, errno.MyALLEGROSQLErrName[errno.ErrAdminCheckBlock])
   474  )