github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/causetstore/petri/acyclic/bindinfo/handle.go (about)

     1  // Copyright 2020 WHTCORPS INC, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package bindinfo
    15  
    16  import (
    17  	"context"
    18  	"fmt"
    19  	"runtime"
    20  	"strconv"
    21  	"strings"
    22  	"sync"
    23  	"sync/atomic"
    24  	"time"
    25  
    26  	"github.com/whtcorpsinc/BerolinaSQL"
    27  	"github.com/whtcorpsinc/BerolinaSQL/allegrosql"
    28  	"github.com/whtcorpsinc/BerolinaSQL/ast"
    29  	"github.com/whtcorpsinc/BerolinaSQL/format"
    30  	"github.com/whtcorpsinc/BerolinaSQL/terror"
    31  	"github.com/whtcorpsinc/milevadb/causetstore/einsteindb/oracle"
    32  	"github.com/whtcorpsinc/milevadb/ekv"
    33  	"github.com/whtcorpsinc/milevadb/memex"
    34  	"github.com/whtcorpsinc/milevadb/metrics"
    35  	utilBerolinaSQL "github.com/whtcorpsinc/milevadb/soliton/BerolinaSQL"
    36  	"github.com/whtcorpsinc/milevadb/soliton/chunk"
    37  	"github.com/whtcorpsinc/milevadb/soliton/hint"
    38  	"github.com/whtcorpsinc/milevadb/soliton/logutil"
    39  	"github.com/whtcorpsinc/milevadb/soliton/sqlexec"
    40  	"github.com/whtcorpsinc/milevadb/soliton/stmtsummary"
    41  	"github.com/whtcorpsinc/milevadb/soliton/timeutil"
    42  	"github.com/whtcorpsinc/milevadb/stochastikctx"
    43  	"github.com/whtcorpsinc/milevadb/stochastikctx/variable"
    44  	"github.com/whtcorpsinc/milevadb/types"
    45  	driver "github.com/whtcorpsinc/milevadb/types/BerolinaSQL_driver"
    46  	"go.uber.org/zap"
    47  )
    48  
    49  // BindHandle is used to handle all global allegrosql bind operations.
    50  type BindHandle struct {
    51  	sctx struct {
    52  		sync.Mutex
    53  		stochastikctx.Context
    54  	}
    55  
    56  	// bindInfo caches the allegrosql bind info from storage.
    57  	//
    58  	// The Mutex protects that there is only one goroutine changes the content
    59  	// of atomic.Value.
    60  	//
    61  	// NOTE: Concurrent Value Write:
    62  	//
    63  	//    bindInfo.Lock()
    64  	//    newCache := bindInfo.Value.Load()
    65  	//    do the write operation on the newCache
    66  	//    bindInfo.Value.CausetStore(newCache)
    67  	//    bindInfo.Unlock()
    68  	//
    69  	// NOTE: Concurrent Value Read:
    70  	//
    71  	//    cache := bindInfo.Load().
    72  	//    read the content
    73  	//
    74  	bindInfo struct {
    75  		sync.Mutex
    76  		atomic.Value
    77  		BerolinaSQL       *BerolinaSQL.BerolinaSQL
    78  		lastUFIDelateTime types.Time
    79  	}
    80  
    81  	// invalidBindRecordMap indicates the invalid bind records found during querying.
    82  	// A record will be deleted from this map, after 2 bind-lease, after it is dropped from the ekv.
    83  	invalidBindRecordMap tmpBindRecordMap
    84  
    85  	// pendingVerifyBindRecordMap indicates the pending verify bind records that found during query.
    86  	pendingVerifyBindRecordMap tmpBindRecordMap
    87  }
    88  
    89  // Lease influences the duration of loading bind info and handling invalid bind.
    90  var Lease = 3 * time.Second
    91  
    92  const (
    93  	// TenantKey is the bindinfo tenant path that is saved to etcd.
    94  	TenantKey = "/milevadb/bindinfo/tenant"
    95  	// Prompt is the prompt for bindinfo tenant manager.
    96  	Prompt = "bindinfo"
    97  )
    98  
    99  type bindRecordUFIDelate struct {
   100  	bindRecord    *BindRecord
   101  	uFIDelateTime time.Time
   102  }
   103  
   104  // NewBindHandle creates a new BindHandle.
   105  func NewBindHandle(ctx stochastikctx.Context) *BindHandle {
   106  	handle := &BindHandle{}
   107  	handle.sctx.Context = ctx
   108  	handle.bindInfo.Value.CausetStore(make(cache, 32))
   109  	handle.bindInfo.BerolinaSQL = BerolinaSQL.New()
   110  	handle.invalidBindRecordMap.Value.CausetStore(make(map[string]*bindRecordUFIDelate))
   111  	handle.invalidBindRecordMap.flushFunc = func(record *BindRecord) error {
   112  		return handle.DropBindRecord(record.OriginalALLEGROSQL, record.EDB, &record.Bindings[0])
   113  	}
   114  	handle.pendingVerifyBindRecordMap.Value.CausetStore(make(map[string]*bindRecordUFIDelate))
   115  	handle.pendingVerifyBindRecordMap.flushFunc = func(record *BindRecord) error {
   116  		// BindALLEGROSQL has already been validated when coming here, so we use nil sctx parameter.
   117  		return handle.AddBindRecord(nil, record)
   118  	}
   119  	return handle
   120  }
   121  
   122  // UFIDelate uFIDelates the global allegrosql bind cache.
   123  func (h *BindHandle) UFIDelate(fullLoad bool) (err error) {
   124  	h.bindInfo.Lock()
   125  	lastUFIDelateTime := h.bindInfo.lastUFIDelateTime
   126  	h.bindInfo.Unlock()
   127  
   128  	allegrosql := "select original_sql, bind_sql, default_db, status, create_time, uFIDelate_time, charset, collation, source from allegrosql.bind_info"
   129  	if !fullLoad {
   130  		allegrosql += " where uFIDelate_time > \"" + lastUFIDelateTime.String() + "\""
   131  	}
   132  	// We need to apply the uFIDelates by order, wrong apply order of same original allegrosql may cause inconsistent state.
   133  	allegrosql += " order by uFIDelate_time"
   134  
   135  	// No need to acquire the stochastik context dagger for InterDircRestrictedALLEGROSQL, it
   136  	// uses another background stochastik.
   137  	rows, _, err := h.sctx.Context.(sqlexec.RestrictedALLEGROSQLInterlockingDirectorate).InterDircRestrictedALLEGROSQL(allegrosql)
   138  	if err != nil {
   139  		return err
   140  	}
   141  
   142  	// Make sure there is only one goroutine writes the cache.
   143  	h.bindInfo.Lock()
   144  	newCache := h.bindInfo.Value.Load().(cache).copy()
   145  	defer func() {
   146  		h.bindInfo.lastUFIDelateTime = lastUFIDelateTime
   147  		h.bindInfo.Value.CausetStore(newCache)
   148  		h.bindInfo.Unlock()
   149  	}()
   150  
   151  	for _, event := range rows {
   152  		hash, spacetime, err := h.newBindRecord(event)
   153  		// UFIDelate lastUFIDelateTime to the newest one.
   154  		if spacetime.Bindings[0].UFIDelateTime.Compare(lastUFIDelateTime) > 0 {
   155  			lastUFIDelateTime = spacetime.Bindings[0].UFIDelateTime
   156  		}
   157  		if err != nil {
   158  			logutil.BgLogger().Info("uFIDelate bindinfo failed", zap.Error(err))
   159  			continue
   160  		}
   161  
   162  		oldRecord := newCache.getBindRecord(hash, spacetime.OriginalALLEGROSQL, spacetime.EDB)
   163  		newRecord := merge(oldRecord, spacetime).removeDeletedBindings()
   164  		if len(newRecord.Bindings) > 0 {
   165  			newCache.setBindRecord(hash, newRecord)
   166  		} else {
   167  			newCache.removeDeletedBindRecord(hash, newRecord)
   168  		}
   169  		uFIDelateMetrics(metrics.ScopeGlobal, oldRecord, newCache.getBindRecord(hash, spacetime.OriginalALLEGROSQL, spacetime.EDB), true)
   170  	}
   171  	return nil
   172  }
   173  
   174  // CreateBindRecord creates a BindRecord to the storage and the cache.
   175  // It replaces all the exists bindings for the same normalized ALLEGROALLEGROSQL.
   176  func (h *BindHandle) CreateBindRecord(sctx stochastikctx.Context, record *BindRecord) (err error) {
   177  	err = record.prepareHints(sctx)
   178  	if err != nil {
   179  		return err
   180  	}
   181  
   182  	exec, _ := h.sctx.Context.(sqlexec.ALLEGROSQLInterlockingDirectorate)
   183  	h.sctx.Lock()
   184  	_, err = exec.InterDircuteInternal(context.TODO(), "BEGIN")
   185  	if err != nil {
   186  		h.sctx.Unlock()
   187  		return
   188  	}
   189  
   190  	normalizedALLEGROSQL := BerolinaSQL.DigestNormalized(record.OriginalALLEGROSQL)
   191  	oldRecord := h.GetBindRecord(normalizedALLEGROSQL, record.OriginalALLEGROSQL, record.EDB)
   192  
   193  	defer func() {
   194  		if err != nil {
   195  			_, err1 := exec.InterDircuteInternal(context.TODO(), "ROLLBACK")
   196  			h.sctx.Unlock()
   197  			terror.Log(err1)
   198  			return
   199  		}
   200  
   201  		_, err = exec.InterDircuteInternal(context.TODO(), "COMMIT")
   202  		h.sctx.Unlock()
   203  		if err != nil {
   204  			return
   205  		}
   206  
   207  		// Make sure there is only one goroutine writes the cache and uses BerolinaSQL.
   208  		h.bindInfo.Lock()
   209  		if oldRecord != nil {
   210  			h.removeBindRecord(normalizedALLEGROSQL, oldRecord)
   211  		}
   212  		h.appendBindRecord(normalizedALLEGROSQL, record)
   213  		h.bindInfo.Unlock()
   214  	}()
   215  
   216  	txn, err1 := h.sctx.Context.Txn(true)
   217  	if err1 != nil {
   218  		return err1
   219  	}
   220  	now := types.NewTime(types.FromGoTime(oracle.GetTimeFromTS(txn.StartTS())), allegrosql.TypeTimestamp, 3)
   221  
   222  	if oldRecord != nil {
   223  		for _, binding := range oldRecord.Bindings {
   224  			_, err1 = exec.InterDircuteInternal(context.TODO(), h.logicalDeleteBindInfoALLEGROSQL(record.OriginalALLEGROSQL, record.EDB, now, binding.BindALLEGROSQL))
   225  			if err != nil {
   226  				return err1
   227  			}
   228  		}
   229  	}
   230  
   231  	for i := range record.Bindings {
   232  		record.Bindings[i].CreateTime = now
   233  		record.Bindings[i].UFIDelateTime = now
   234  
   235  		// insert the BindRecord to the storage.
   236  		_, err = exec.InterDircuteInternal(context.TODO(), h.insertBindInfoALLEGROSQL(record.OriginalALLEGROSQL, record.EDB, record.Bindings[i]))
   237  		if err != nil {
   238  			return err
   239  		}
   240  	}
   241  	return nil
   242  }
   243  
   244  // AddBindRecord adds a BindRecord to the storage and BindRecord to the cache.
   245  func (h *BindHandle) AddBindRecord(sctx stochastikctx.Context, record *BindRecord) (err error) {
   246  	err = record.prepareHints(sctx)
   247  	if err != nil {
   248  		return err
   249  	}
   250  
   251  	oldRecord := h.GetBindRecord(BerolinaSQL.DigestNormalized(record.OriginalALLEGROSQL), record.OriginalALLEGROSQL, record.EDB)
   252  	var duplicateBinding *Binding
   253  	if oldRecord != nil {
   254  		binding := oldRecord.FindBinding(record.Bindings[0].ID)
   255  		if binding != nil {
   256  			// There is already a binding with status `Using`, `PendingVerify` or `Rejected`, we could directly cancel the job.
   257  			if record.Bindings[0].Status == PendingVerify {
   258  				return nil
   259  			}
   260  			// Otherwise, we need to remove it before insert.
   261  			duplicateBinding = binding
   262  		}
   263  	}
   264  
   265  	exec, _ := h.sctx.Context.(sqlexec.ALLEGROSQLInterlockingDirectorate)
   266  	h.sctx.Lock()
   267  	_, err = exec.InterDircuteInternal(context.TODO(), "BEGIN")
   268  	if err != nil {
   269  		h.sctx.Unlock()
   270  		return
   271  	}
   272  
   273  	defer func() {
   274  		if err != nil {
   275  			_, err1 := exec.InterDircuteInternal(context.TODO(), "ROLLBACK")
   276  			h.sctx.Unlock()
   277  			terror.Log(err1)
   278  			return
   279  		}
   280  
   281  		_, err = exec.InterDircuteInternal(context.TODO(), "COMMIT")
   282  		h.sctx.Unlock()
   283  		if err != nil {
   284  			return
   285  		}
   286  
   287  		// Make sure there is only one goroutine writes the cache and uses BerolinaSQL.
   288  		h.bindInfo.Lock()
   289  		h.appendBindRecord(BerolinaSQL.DigestNormalized(record.OriginalALLEGROSQL), record)
   290  		h.bindInfo.Unlock()
   291  	}()
   292  
   293  	txn, err1 := h.sctx.Context.Txn(true)
   294  	if err1 != nil {
   295  		return err1
   296  	}
   297  
   298  	if duplicateBinding != nil {
   299  		_, err = exec.InterDircuteInternal(context.TODO(), h.deleteBindInfoALLEGROSQL(record.OriginalALLEGROSQL, record.EDB, duplicateBinding.BindALLEGROSQL))
   300  		if err != nil {
   301  			return err
   302  		}
   303  	}
   304  
   305  	now := types.NewTime(types.FromGoTime(oracle.GetTimeFromTS(txn.StartTS())), allegrosql.TypeTimestamp, 3)
   306  	for i := range record.Bindings {
   307  		if duplicateBinding != nil {
   308  			record.Bindings[i].CreateTime = duplicateBinding.CreateTime
   309  		} else {
   310  			record.Bindings[i].CreateTime = now
   311  		}
   312  		record.Bindings[i].UFIDelateTime = now
   313  
   314  		// insert the BindRecord to the storage.
   315  		_, err = exec.InterDircuteInternal(context.TODO(), h.insertBindInfoALLEGROSQL(record.OriginalALLEGROSQL, record.EDB, record.Bindings[i]))
   316  		if err != nil {
   317  			return err
   318  		}
   319  	}
   320  	return nil
   321  }
   322  
   323  // DropBindRecord drops a BindRecord to the storage and BindRecord int the cache.
   324  func (h *BindHandle) DropBindRecord(originalALLEGROSQL, EDB string, binding *Binding) (err error) {
   325  	exec, _ := h.sctx.Context.(sqlexec.ALLEGROSQLInterlockingDirectorate)
   326  	h.sctx.Lock()
   327  	_, err = exec.InterDircuteInternal(context.TODO(), "BEGIN")
   328  	if err != nil {
   329  		h.sctx.Unlock()
   330  		return
   331  	}
   332  
   333  	defer func() {
   334  		if err != nil {
   335  			_, err1 := exec.InterDircuteInternal(context.TODO(), "ROLLBACK")
   336  			h.sctx.Unlock()
   337  			terror.Log(err1)
   338  			return
   339  		}
   340  
   341  		_, err = exec.InterDircuteInternal(context.TODO(), "COMMIT")
   342  		h.sctx.Unlock()
   343  		if err != nil {
   344  			return
   345  		}
   346  
   347  		record := &BindRecord{OriginalALLEGROSQL: originalALLEGROSQL, EDB: EDB}
   348  		if binding != nil {
   349  			record.Bindings = append(record.Bindings, *binding)
   350  		}
   351  		// Make sure there is only one goroutine writes the cache and uses BerolinaSQL.
   352  		h.bindInfo.Lock()
   353  		h.removeBindRecord(BerolinaSQL.DigestNormalized(originalALLEGROSQL), record)
   354  		h.bindInfo.Unlock()
   355  	}()
   356  
   357  	txn, err1 := h.sctx.Context.Txn(true)
   358  	if err1 != nil {
   359  		return err1
   360  	}
   361  
   362  	uFIDelateTs := types.NewTime(types.FromGoTime(oracle.GetTimeFromTS(txn.StartTS())), allegrosql.TypeTimestamp, 3)
   363  
   364  	bindALLEGROSQL := ""
   365  	if binding != nil {
   366  		bindALLEGROSQL = binding.BindALLEGROSQL
   367  	}
   368  
   369  	_, err = exec.InterDircuteInternal(context.TODO(), h.logicalDeleteBindInfoALLEGROSQL(originalALLEGROSQL, EDB, uFIDelateTs, bindALLEGROSQL))
   370  	return err
   371  }
   372  
   373  // tmpBindRecordMap is used to temporarily save bind record changes.
   374  // Those changes will be flushed into causetstore periodically.
   375  type tmpBindRecordMap struct {
   376  	sync.Mutex
   377  	atomic.Value
   378  	flushFunc func(record *BindRecord) error
   379  }
   380  
   381  // flushToStore calls flushFunc for items in tmpBindRecordMap and removes them with a delay.
   382  func (tmpMap *tmpBindRecordMap) flushToStore() {
   383  	tmpMap.Lock()
   384  	defer tmpMap.Unlock()
   385  	newMap := copyBindRecordUFIDelateMap(tmpMap.Load().(map[string]*bindRecordUFIDelate))
   386  	for key, bindRecord := range newMap {
   387  		if bindRecord.uFIDelateTime.IsZero() {
   388  			err := tmpMap.flushFunc(bindRecord.bindRecord)
   389  			if err != nil {
   390  				logutil.BgLogger().Error("flush bind record failed", zap.Error(err))
   391  			}
   392  			bindRecord.uFIDelateTime = time.Now()
   393  			continue
   394  		}
   395  
   396  		if time.Since(bindRecord.uFIDelateTime) > 6*time.Second {
   397  			delete(newMap, key)
   398  			uFIDelateMetrics(metrics.ScopeGlobal, bindRecord.bindRecord, nil, false)
   399  		}
   400  	}
   401  	tmpMap.CausetStore(newMap)
   402  }
   403  
   404  // Add puts a BindRecord into tmpBindRecordMap.
   405  func (tmpMap *tmpBindRecordMap) Add(bindRecord *BindRecord) {
   406  	key := bindRecord.OriginalALLEGROSQL + ":" + bindRecord.EDB + ":" + bindRecord.Bindings[0].ID
   407  	if _, ok := tmpMap.Load().(map[string]*bindRecordUFIDelate)[key]; ok {
   408  		return
   409  	}
   410  	tmpMap.Lock()
   411  	defer tmpMap.Unlock()
   412  	if _, ok := tmpMap.Load().(map[string]*bindRecordUFIDelate)[key]; ok {
   413  		return
   414  	}
   415  	newMap := copyBindRecordUFIDelateMap(tmpMap.Load().(map[string]*bindRecordUFIDelate))
   416  	newMap[key] = &bindRecordUFIDelate{
   417  		bindRecord: bindRecord,
   418  	}
   419  	tmpMap.CausetStore(newMap)
   420  	uFIDelateMetrics(metrics.ScopeGlobal, nil, bindRecord, false)
   421  }
   422  
   423  // DropInvalidBindRecord executes the drop BindRecord tasks.
   424  func (h *BindHandle) DropInvalidBindRecord() {
   425  	h.invalidBindRecordMap.flushToStore()
   426  }
   427  
   428  // AddDropInvalidBindTask adds BindRecord which needs to be deleted into invalidBindRecordMap.
   429  func (h *BindHandle) AddDropInvalidBindTask(invalidBindRecord *BindRecord) {
   430  	h.invalidBindRecordMap.Add(invalidBindRecord)
   431  }
   432  
   433  // Size returns the size of bind info cache.
   434  func (h *BindHandle) Size() int {
   435  	size := 0
   436  	for _, bindRecords := range h.bindInfo.Load().(cache) {
   437  		size += len(bindRecords)
   438  	}
   439  	return size
   440  }
   441  
   442  // GetBindRecord returns the BindRecord of the (normdOrigALLEGROSQL,EDB) if BindRecord exist.
   443  func (h *BindHandle) GetBindRecord(hash, normdOrigALLEGROSQL, EDB string) *BindRecord {
   444  	return h.bindInfo.Load().(cache).getBindRecord(hash, normdOrigALLEGROSQL, EDB)
   445  }
   446  
   447  // GetAllBindRecord returns all bind records in cache.
   448  func (h *BindHandle) GetAllBindRecord() (bindRecords []*BindRecord) {
   449  	bindRecordMap := h.bindInfo.Load().(cache)
   450  	for _, bindRecord := range bindRecordMap {
   451  		bindRecords = append(bindRecords, bindRecord...)
   452  	}
   453  	return bindRecords
   454  }
   455  
   456  // newBindRecord builds BindRecord from a tuple in storage.
   457  func (h *BindHandle) newBindRecord(event chunk.Row) (string, *BindRecord, error) {
   458  	hint := Binding{
   459  		BindALLEGROSQL: event.GetString(1),
   460  		Status:         event.GetString(3),
   461  		CreateTime:     event.GetTime(4),
   462  		UFIDelateTime:  event.GetTime(5),
   463  		Charset:        event.GetString(6),
   464  		DefCauslation:  event.GetString(7),
   465  		Source:         event.GetString(8),
   466  	}
   467  	bindRecord := &BindRecord{
   468  		OriginalALLEGROSQL: event.GetString(0),
   469  		EDB:                event.GetString(2),
   470  		Bindings:           []Binding{hint},
   471  	}
   472  	hash := BerolinaSQL.DigestNormalized(bindRecord.OriginalALLEGROSQL)
   473  	h.sctx.Lock()
   474  	defer h.sctx.Unlock()
   475  	h.sctx.GetStochastikVars().CurrentDB = bindRecord.EDB
   476  	err := bindRecord.prepareHints(h.sctx.Context)
   477  	return hash, bindRecord, err
   478  }
   479  
   480  // appendBindRecord addes the BindRecord to the cache, all the stale BindRecords are
   481  // removed from the cache after this operation.
   482  func (h *BindHandle) appendBindRecord(hash string, spacetime *BindRecord) {
   483  	newCache := h.bindInfo.Value.Load().(cache).copy()
   484  	oldRecord := newCache.getBindRecord(hash, spacetime.OriginalALLEGROSQL, spacetime.EDB)
   485  	newRecord := merge(oldRecord, spacetime)
   486  	newCache.setBindRecord(hash, newRecord)
   487  	h.bindInfo.Value.CausetStore(newCache)
   488  	uFIDelateMetrics(metrics.ScopeGlobal, oldRecord, newRecord, false)
   489  }
   490  
   491  // removeBindRecord removes the BindRecord from the cache.
   492  func (h *BindHandle) removeBindRecord(hash string, spacetime *BindRecord) {
   493  	newCache := h.bindInfo.Value.Load().(cache).copy()
   494  	oldRecord := newCache.getBindRecord(hash, spacetime.OriginalALLEGROSQL, spacetime.EDB)
   495  	newCache.removeDeletedBindRecord(hash, spacetime)
   496  	h.bindInfo.Value.CausetStore(newCache)
   497  	uFIDelateMetrics(metrics.ScopeGlobal, oldRecord, newCache.getBindRecord(hash, spacetime.OriginalALLEGROSQL, spacetime.EDB), false)
   498  }
   499  
   500  // removeDeletedBindRecord removes the BindRecord which has same originALLEGROSQL and EDB with specified BindRecord.
   501  func (c cache) removeDeletedBindRecord(hash string, spacetime *BindRecord) {
   502  	spacetimes, ok := c[hash]
   503  	if !ok {
   504  		return
   505  	}
   506  
   507  	for i := len(spacetimes) - 1; i >= 0; i-- {
   508  		if spacetimes[i].isSame(spacetime) {
   509  			spacetimes[i] = spacetimes[i].remove(spacetime)
   510  			if len(spacetimes[i].Bindings) == 0 {
   511  				spacetimes = append(spacetimes[:i], spacetimes[i+1:]...)
   512  			}
   513  			if len(spacetimes) == 0 {
   514  				delete(c, hash)
   515  				return
   516  			}
   517  		}
   518  	}
   519  	c[hash] = spacetimes
   520  }
   521  
   522  func (c cache) setBindRecord(hash string, spacetime *BindRecord) {
   523  	spacetimes := c[hash]
   524  	for i := range spacetimes {
   525  		if spacetimes[i].EDB == spacetime.EDB && spacetimes[i].OriginalALLEGROSQL == spacetime.OriginalALLEGROSQL {
   526  			spacetimes[i] = spacetime
   527  			return
   528  		}
   529  	}
   530  	c[hash] = append(c[hash], spacetime)
   531  }
   532  
   533  func (c cache) copy() cache {
   534  	newCache := make(cache, len(c))
   535  	for k, v := range c {
   536  		bindRecords := make([]*BindRecord, len(v))
   537  		copy(bindRecords, v)
   538  		newCache[k] = bindRecords
   539  	}
   540  	return newCache
   541  }
   542  
   543  func copyBindRecordUFIDelateMap(oldMap map[string]*bindRecordUFIDelate) map[string]*bindRecordUFIDelate {
   544  	newMap := make(map[string]*bindRecordUFIDelate, len(oldMap))
   545  	for k, v := range oldMap {
   546  		newMap[k] = v
   547  	}
   548  	return newMap
   549  }
   550  
   551  func (c cache) getBindRecord(hash, normdOrigALLEGROSQL, EDB string) *BindRecord {
   552  	bindRecords := c[hash]
   553  	for _, bindRecord := range bindRecords {
   554  		if bindRecord.OriginalALLEGROSQL == normdOrigALLEGROSQL && bindRecord.EDB == EDB {
   555  			return bindRecord
   556  		}
   557  	}
   558  	return nil
   559  }
   560  
   561  func (h *BindHandle) deleteBindInfoALLEGROSQL(normdOrigALLEGROSQL, EDB, bindALLEGROSQL string) string {
   562  	return fmt.Sprintf(
   563  		`DELETE FROM allegrosql.bind_info WHERE original_sql=%s AND default_db=%s AND bind_sql=%s`,
   564  		memex.Quote(normdOrigALLEGROSQL),
   565  		memex.Quote(EDB),
   566  		memex.Quote(bindALLEGROSQL),
   567  	)
   568  }
   569  
   570  func (h *BindHandle) insertBindInfoALLEGROSQL(orignalALLEGROSQL string, EDB string, info Binding) string {
   571  	return fmt.Sprintf(`INSERT INTO allegrosql.bind_info VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)`,
   572  		memex.Quote(orignalALLEGROSQL),
   573  		memex.Quote(info.BindALLEGROSQL),
   574  		memex.Quote(EDB),
   575  		memex.Quote(info.Status),
   576  		memex.Quote(info.CreateTime.String()),
   577  		memex.Quote(info.UFIDelateTime.String()),
   578  		memex.Quote(info.Charset),
   579  		memex.Quote(info.DefCauslation),
   580  		memex.Quote(info.Source),
   581  	)
   582  }
   583  
   584  func (h *BindHandle) logicalDeleteBindInfoALLEGROSQL(originalALLEGROSQL, EDB string, uFIDelateTs types.Time, bindingALLEGROSQL string) string {
   585  	allegrosql := fmt.Sprintf(`UFIDelATE allegrosql.bind_info SET status=%s,uFIDelate_time=%s WHERE original_sql=%s and default_db=%s`,
   586  		memex.Quote(deleted),
   587  		memex.Quote(uFIDelateTs.String()),
   588  		memex.Quote(originalALLEGROSQL),
   589  		memex.Quote(EDB))
   590  	if bindingALLEGROSQL == "" {
   591  		return allegrosql
   592  	}
   593  	return allegrosql + fmt.Sprintf(` and bind_sql = %s`, memex.Quote(bindingALLEGROSQL))
   594  }
   595  
   596  // CaptureBaselines is used to automatically capture plan baselines.
   597  func (h *BindHandle) CaptureBaselines() {
   598  	BerolinaSQL4Capture := BerolinaSQL.New()
   599  	schemas, sqls := stmtsummary.StmtSummaryByDigestMap.GetMoreThanOnceSelect()
   600  	for i := range sqls {
   601  		stmt, err := BerolinaSQL4Capture.ParseOneStmt(sqls[i], "", "")
   602  		if err != nil {
   603  			logutil.BgLogger().Debug("parse ALLEGROALLEGROSQL failed", zap.String("ALLEGROALLEGROSQL", sqls[i]), zap.Error(err))
   604  			continue
   605  		}
   606  		normalizedALLEGROSQL, digiest := BerolinaSQL.NormalizeDigest(sqls[i])
   607  		dbName := utilBerolinaSQL.GetDefaultDB(stmt, schemas[i])
   608  		if r := h.GetBindRecord(digiest, normalizedALLEGROSQL, dbName); r != nil && r.HasUsingBinding() {
   609  			continue
   610  		}
   611  		h.sctx.Lock()
   612  		h.sctx.GetStochastikVars().CurrentDB = schemas[i]
   613  		oriIsolationRead := h.sctx.GetStochastikVars().IsolationReadEngines
   614  		// TODO: support all engines plan hint in capture baselines.
   615  		h.sctx.GetStochastikVars().IsolationReadEngines = map[ekv.StoreType]struct{}{ekv.EinsteinDB: {}}
   616  		hints, err := getHintsForALLEGROSQL(h.sctx.Context, sqls[i])
   617  		h.sctx.GetStochastikVars().IsolationReadEngines = oriIsolationRead
   618  		h.sctx.Unlock()
   619  		if err != nil {
   620  			logutil.BgLogger().Debug("generate hints failed", zap.String("ALLEGROALLEGROSQL", sqls[i]), zap.Error(err))
   621  			continue
   622  		}
   623  		bindALLEGROSQL := GenerateBindALLEGROSQL(context.TODO(), stmt, hints)
   624  		if bindALLEGROSQL == "" {
   625  			continue
   626  		}
   627  		charset, collation := h.sctx.GetStochastikVars().GetCharsetInfo()
   628  		binding := Binding{
   629  			BindALLEGROSQL: bindALLEGROSQL,
   630  			Status:         Using,
   631  			Charset:        charset,
   632  			DefCauslation:  collation,
   633  			Source:         Capture,
   634  		}
   635  		// We don't need to pass the `sctx` because the BindALLEGROSQL has been validated already.
   636  		err = h.AddBindRecord(nil, &BindRecord{OriginalALLEGROSQL: normalizedALLEGROSQL, EDB: dbName, Bindings: []Binding{binding}})
   637  		if err != nil {
   638  			logutil.BgLogger().Info("capture baseline failed", zap.String("ALLEGROALLEGROSQL", sqls[i]), zap.Error(err))
   639  		}
   640  	}
   641  }
   642  
   643  func getHintsForALLEGROSQL(sctx stochastikctx.Context, allegrosql string) (string, error) {
   644  	origVals := sctx.GetStochastikVars().UseCausetBaselines
   645  	sctx.GetStochastikVars().UseCausetBaselines = false
   646  	recordSets, err := sctx.(sqlexec.ALLEGROSQLInterlockingDirectorate).InterDircuteInternal(context.TODO(), fmt.Sprintf("explain format='hint' %s", allegrosql))
   647  	sctx.GetStochastikVars().UseCausetBaselines = origVals
   648  	if len(recordSets) > 0 {
   649  		defer terror.Log(recordSets[0].Close())
   650  	}
   651  	if err != nil {
   652  		return "", err
   653  	}
   654  	chk := recordSets[0].NewChunk()
   655  	err = recordSets[0].Next(context.TODO(), chk)
   656  	if err != nil {
   657  		return "", err
   658  	}
   659  	return chk.GetRow(0).GetString(0), nil
   660  }
   661  
   662  // GenerateBindALLEGROSQL generates binding sqls from stmt node and plan hints.
   663  func GenerateBindALLEGROSQL(ctx context.Context, stmtNode ast.StmtNode, planHint string) string {
   664  	// If would be nil for very simple cases such as point get, we do not need to evolve for them.
   665  	if planHint == "" {
   666  		return ""
   667  	}
   668  	paramChecker := &paramMarkerChecker{}
   669  	stmtNode.Accept(paramChecker)
   670  	// We need to evolve on current allegrosql, but we cannot restore values for paramMarkers yet,
   671  	// so just ignore them now.
   672  	if paramChecker.hasParamMarker {
   673  		return ""
   674  	}
   675  	// We need to evolve plan based on the current allegrosql, not the original allegrosql which may have different parameters.
   676  	// So here we would remove the hint and inject the current best plan hint.
   677  	hint.BindHint(stmtNode, &hint.HintsSet{})
   678  	var sb strings.Builder
   679  	restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &sb)
   680  	err := stmtNode.Restore(restoreCtx)
   681  	if err != nil {
   682  		logutil.Logger(ctx).Warn("Restore ALLEGROALLEGROSQL failed", zap.Error(err))
   683  	}
   684  	bindALLEGROSQL := sb.String()
   685  	selectIdx := strings.Index(bindALLEGROSQL, "SELECT")
   686  	// Remove possible `explain` prefix.
   687  	bindALLEGROSQL = bindALLEGROSQL[selectIdx:]
   688  	return strings.Replace(bindALLEGROSQL, "SELECT", fmt.Sprintf("SELECT /*+ %s*/", planHint), 1)
   689  }
   690  
   691  type paramMarkerChecker struct {
   692  	hasParamMarker bool
   693  }
   694  
   695  func (e *paramMarkerChecker) Enter(in ast.Node) (ast.Node, bool) {
   696  	if _, ok := in.(*driver.ParamMarkerExpr); ok {
   697  		e.hasParamMarker = true
   698  		return in, true
   699  	}
   700  	return in, false
   701  }
   702  
   703  func (e *paramMarkerChecker) Leave(in ast.Node) (ast.Node, bool) {
   704  	return in, true
   705  }
   706  
   707  // AddEvolveCausetTask adds the evolve plan task into memory cache. It would be flushed to causetstore periodically.
   708  func (h *BindHandle) AddEvolveCausetTask(originalALLEGROSQL, EDB string, binding Binding) {
   709  	br := &BindRecord{
   710  		OriginalALLEGROSQL: originalALLEGROSQL,
   711  		EDB:                EDB,
   712  		Bindings:           []Binding{binding},
   713  	}
   714  	h.pendingVerifyBindRecordMap.Add(br)
   715  }
   716  
   717  // SaveEvolveTasksToStore saves the evolve task into causetstore.
   718  func (h *BindHandle) SaveEvolveTasksToStore() {
   719  	h.pendingVerifyBindRecordMap.flushToStore()
   720  }
   721  
   722  func getEvolveParameters(ctx stochastikctx.Context) (time.Duration, time.Time, time.Time, error) {
   723  	allegrosql := fmt.Sprintf("select variable_name, variable_value from allegrosql.global_variables where variable_name in ('%s', '%s', '%s')",
   724  		variable.MilevaDBEvolveCausetTaskMaxTime, variable.MilevaDBEvolveCausetTaskStartTime, variable.MilevaDBEvolveCausetTaskEndTime)
   725  	rows, _, err := ctx.(sqlexec.RestrictedALLEGROSQLInterlockingDirectorate).InterDircRestrictedALLEGROSQL(allegrosql)
   726  	if err != nil {
   727  		return 0, time.Time{}, time.Time{}, err
   728  	}
   729  	maxTime, startTimeStr, endTimeStr := int64(variable.DefMilevaDBEvolveCausetTaskMaxTime), variable.DefMilevaDBEvolveCausetTaskStartTime, variable.DefAutoAnalyzeEndTime
   730  	for _, event := range rows {
   731  		switch event.GetString(0) {
   732  		case variable.MilevaDBEvolveCausetTaskMaxTime:
   733  			maxTime, err = strconv.ParseInt(event.GetString(1), 10, 64)
   734  			if err != nil {
   735  				return 0, time.Time{}, time.Time{}, err
   736  			}
   737  		case variable.MilevaDBEvolveCausetTaskStartTime:
   738  			startTimeStr = event.GetString(1)
   739  		case variable.MilevaDBEvolveCausetTaskEndTime:
   740  			endTimeStr = event.GetString(1)
   741  		}
   742  	}
   743  	startTime, err := time.ParseInLocation(variable.FullDayTimeFormat, startTimeStr, time.UTC)
   744  	if err != nil {
   745  		return 0, time.Time{}, time.Time{}, err
   746  
   747  	}
   748  	endTime, err := time.ParseInLocation(variable.FullDayTimeFormat, endTimeStr, time.UTC)
   749  	if err != nil {
   750  		return 0, time.Time{}, time.Time{}, err
   751  	}
   752  	return time.Duration(maxTime) * time.Second, startTime, endTime, nil
   753  }
   754  
   755  const (
   756  	// acceptFactor is the factor to decide should we accept the pending verified plan.
   757  	// A pending verified plan will be accepted if it performs at least `acceptFactor` times better than the accepted plans.
   758  	acceptFactor = 1.5
   759  	// nextVerifyDuration is the duration that we will retry the rejected plans.
   760  	nextVerifyDuration = 7 * 24 * time.Hour
   761  )
   762  
   763  func (h *BindHandle) getOnePendingVerifyJob() (string, string, Binding) {
   764  	cache := h.bindInfo.Value.Load().(cache)
   765  	for _, bindRecords := range cache {
   766  		for _, bindRecord := range bindRecords {
   767  			for _, bind := range bindRecord.Bindings {
   768  				if bind.Status == PendingVerify {
   769  					return bindRecord.OriginalALLEGROSQL, bindRecord.EDB, bind
   770  				}
   771  				if bind.Status != Rejected {
   772  					continue
   773  				}
   774  				dur, err := bind.SinceUFIDelateTime()
   775  				// Should not happen.
   776  				if err != nil {
   777  					continue
   778  				}
   779  				// Rejected and retry it now.
   780  				if dur > nextVerifyDuration {
   781  					return bindRecord.OriginalALLEGROSQL, bindRecord.EDB, bind
   782  				}
   783  			}
   784  		}
   785  	}
   786  	return "", "", Binding{}
   787  }
   788  
   789  func (h *BindHandle) getRunningDuration(sctx stochastikctx.Context, EDB, allegrosql string, maxTime time.Duration) (time.Duration, error) {
   790  	ctx := context.TODO()
   791  	if EDB != "" {
   792  		_, err := sctx.(sqlexec.ALLEGROSQLInterlockingDirectorate).InterDircuteInternal(ctx, fmt.Sprintf("use `%s`", EDB))
   793  		if err != nil {
   794  			return 0, err
   795  		}
   796  	}
   797  	ctx, cancelFunc := context.WithCancel(ctx)
   798  	timer := time.NewTimer(maxTime)
   799  	resultChan := make(chan error)
   800  	startTime := time.Now()
   801  	go runALLEGROSQL(ctx, sctx, allegrosql, resultChan)
   802  	select {
   803  	case err := <-resultChan:
   804  		cancelFunc()
   805  		if err != nil {
   806  			return 0, err
   807  		}
   808  		return time.Since(startTime), nil
   809  	case <-timer.C:
   810  		cancelFunc()
   811  	}
   812  	<-resultChan
   813  	return -1, nil
   814  }
   815  
   816  func runALLEGROSQL(ctx context.Context, sctx stochastikctx.Context, allegrosql string, resultChan chan<- error) {
   817  	defer func() {
   818  		if r := recover(); r != nil {
   819  			buf := make([]byte, 4096)
   820  			stackSize := runtime.Stack(buf, false)
   821  			buf = buf[:stackSize]
   822  			resultChan <- fmt.Errorf("run allegrosql panicked: %v", string(buf))
   823  		}
   824  	}()
   825  	recordSets, err := sctx.(sqlexec.ALLEGROSQLInterlockingDirectorate).InterDircuteInternal(ctx, allegrosql)
   826  	if err != nil {
   827  		if len(recordSets) > 0 {
   828  			terror.Call(recordSets[0].Close)
   829  		}
   830  		resultChan <- err
   831  		return
   832  	}
   833  	recordSet := recordSets[0]
   834  	chk := recordSets[0].NewChunk()
   835  	for {
   836  		err = recordSet.Next(ctx, chk)
   837  		if err != nil || chk.NumRows() == 0 {
   838  			break
   839  		}
   840  	}
   841  	terror.Call(recordSets[0].Close)
   842  	resultChan <- err
   843  }
   844  
   845  // HandleEvolveCausetTask tries to evolve one plan task.
   846  // It only handle one tasks once because we want each task could use the latest parameters.
   847  func (h *BindHandle) HandleEvolveCausetTask(sctx stochastikctx.Context, adminEvolve bool) error {
   848  	originalALLEGROSQL, EDB, binding := h.getOnePendingVerifyJob()
   849  	if originalALLEGROSQL == "" {
   850  		return nil
   851  	}
   852  	maxTime, startTime, endTime, err := getEvolveParameters(sctx)
   853  	if err != nil {
   854  		return err
   855  	}
   856  	if maxTime == 0 || (!timeutil.WithinDayTimePeriod(startTime, endTime, time.Now()) && !adminEvolve) {
   857  		return nil
   858  	}
   859  	sctx.GetStochastikVars().UseCausetBaselines = true
   860  	acceptedCausetTime, err := h.getRunningDuration(sctx, EDB, binding.BindALLEGROSQL, maxTime)
   861  	// If we just return the error to the caller, this job will be retried again and again and cause endless logs,
   862  	// since it is still in the bind record. Now we just drop it and if it is actually retryable,
   863  	// we will hope for that we can capture this evolve task again.
   864  	if err != nil {
   865  		return h.DropBindRecord(originalALLEGROSQL, EDB, &binding)
   866  	}
   867  	// If the accepted plan timeouts, it is hard to decide the timeout for verify plan.
   868  	// Currently we simply mark the verify plan as `using` if it could run successfully within maxTime.
   869  	if acceptedCausetTime > 0 {
   870  		maxTime = time.Duration(float64(acceptedCausetTime) / acceptFactor)
   871  	}
   872  	sctx.GetStochastikVars().UseCausetBaselines = false
   873  	verifyCausetTime, err := h.getRunningDuration(sctx, EDB, binding.BindALLEGROSQL, maxTime)
   874  	if err != nil {
   875  		return h.DropBindRecord(originalALLEGROSQL, EDB, &binding)
   876  	}
   877  	if verifyCausetTime < 0 {
   878  		binding.Status = Rejected
   879  	} else {
   880  		binding.Status = Using
   881  	}
   882  	// We don't need to pass the `sctx` because the BindALLEGROSQL has been validated already.
   883  	return h.AddBindRecord(nil, &BindRecord{OriginalALLEGROSQL: originalALLEGROSQL, EDB: EDB, Bindings: []Binding{binding}})
   884  }
   885  
   886  // Clear resets the bind handle. It is only used for test.
   887  func (h *BindHandle) Clear() {
   888  	h.bindInfo.Lock()
   889  	h.bindInfo.CausetStore(make(cache))
   890  	h.bindInfo.lastUFIDelateTime = types.ZeroTimestamp
   891  	h.bindInfo.Unlock()
   892  	h.invalidBindRecordMap.CausetStore(make(map[string]*bindRecordUFIDelate))
   893  	h.pendingVerifyBindRecordMap.CausetStore(make(map[string]*bindRecordUFIDelate))
   894  }
   895  
   896  // FlushBindings flushes the BindRecord in temp maps to storage and loads them into cache.
   897  func (h *BindHandle) FlushBindings() error {
   898  	h.DropInvalidBindRecord()
   899  	h.SaveEvolveTasksToStore()
   900  	return h.UFIDelate(false)
   901  }
   902  
   903  // ReloadBindings clears existing binding cache and do a full load from allegrosql.bind_info.
   904  // It is used to maintain consistency between cache and allegrosql.bind_info if the causet is deleted or truncated.
   905  func (h *BindHandle) ReloadBindings() error {
   906  	h.bindInfo.Lock()
   907  	h.bindInfo.CausetStore(make(cache))
   908  	h.bindInfo.lastUFIDelateTime = types.ZeroTimestamp
   909  	h.bindInfo.Unlock()
   910  	return h.UFIDelate(true)
   911  }