github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/dm/pkg/shardddl/optimism/keeper.go (about)

     1  // Copyright 2020 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package optimism
    15  
    16  import (
    17  	"sort"
    18  	"sync"
    19  
    20  	"github.com/pingcap/tidb/pkg/util/schemacmp"
    21  	"github.com/pingcap/tiflow/dm/config/dbconfig"
    22  	"github.com/pingcap/tiflow/dm/pkg/log"
    23  	"github.com/pingcap/tiflow/dm/pkg/terror"
    24  	"github.com/pingcap/tiflow/dm/pkg/utils"
    25  	clientv3 "go.etcd.io/etcd/client/v3"
    26  	"go.uber.org/zap"
    27  )
    28  
    29  // DownstreamMeta used to fetch table info from downstream.
    30  type DownstreamMeta struct {
    31  	dbConfig *dbconfig.DBConfig
    32  	meta     string
    33  }
    34  
    35  // LockKeeper used to keep and handle DDL lock conveniently.
    36  // The lock information do not need to be persistent, and can be re-constructed from the shard DDL info.
    37  // But the drop columns should be persistent.
    38  type LockKeeper struct {
    39  	mu    sync.RWMutex
    40  	locks map[string]*Lock // lockID -> Lock
    41  
    42  	downstreamMetaMap     map[string]*DownstreamMeta
    43  	getDownstreamMetaFunc func(string) (*dbconfig.DBConfig, string)
    44  	// lockID -> column name -> source -> upSchema -> upTable -> int
    45  	dropColumns map[string]map[string]map[string]map[string]map[string]DropColumnStage
    46  }
    47  
    48  // NewLockKeeper creates a new LockKeeper instance.
    49  func NewLockKeeper(getDownstreamMetaFunc func(string) (*dbconfig.DBConfig, string)) *LockKeeper {
    50  	return &LockKeeper{
    51  		locks:                 make(map[string]*Lock),
    52  		downstreamMetaMap:     make(map[string]*DownstreamMeta),
    53  		getDownstreamMetaFunc: getDownstreamMetaFunc,
    54  	}
    55  }
    56  
    57  // SetDropColumns set drop columns for lock keeper.
    58  func (lk *LockKeeper) SetDropColumns(dropColumns map[string]map[string]map[string]map[string]map[string]DropColumnStage) {
    59  	lk.dropColumns = dropColumns
    60  }
    61  
    62  // getDownstreamMeta gets and cached downstream meta.
    63  func (lk *LockKeeper) getDownstreamMeta(task string) (*DownstreamMeta, error) {
    64  	if downstreamMeta, ok := lk.downstreamMetaMap[task]; ok {
    65  		return downstreamMeta, nil
    66  	}
    67  
    68  	dbConfig, meta := lk.getDownstreamMetaFunc(task)
    69  	if dbConfig == nil {
    70  		return nil, terror.ErrMasterOptimisticDownstreamMetaNotFound.Generate(task)
    71  	}
    72  	downstreamMeta := &DownstreamMeta{dbConfig: dbConfig, meta: meta}
    73  	lk.downstreamMetaMap[task] = downstreamMeta
    74  	return downstreamMeta, nil
    75  }
    76  
    77  // RemoveDownstreamMeta removes downstream mate by task.
    78  func (lk *LockKeeper) RemoveDownstreamMeta(task string) {
    79  	delete(lk.downstreamMetaMap, task)
    80  }
    81  
    82  // TrySync tries to sync the lock.
    83  func (lk *LockKeeper) TrySync(cli *clientv3.Client, info Info, tts []TargetTable) (string, []string, []string, error) {
    84  	var (
    85  		lockID = genDDLLockID(info)
    86  		l      *Lock
    87  		ok     bool
    88  	)
    89  
    90  	lk.mu.Lock()
    91  	defer lk.mu.Unlock()
    92  
    93  	if info.TableInfoBefore == nil {
    94  		return "", nil, nil, terror.ErrMasterOptimisticTableInfoBeforeNotExist.Generate(info.DDLs)
    95  	}
    96  
    97  	if l, ok = lk.locks[lockID]; !ok {
    98  		downstreamMeta, err := lk.getDownstreamMeta(info.Task)
    99  		if err != nil {
   100  			log.L().Error("get downstream meta", log.ShortError(err))
   101  		}
   102  
   103  		lk.locks[lockID] = NewLock(cli, lockID, info.Task, info.DownSchema, info.DownTable, schemacmp.Encode(info.TableInfoBefore), tts, downstreamMeta)
   104  		l = lk.locks[lockID]
   105  
   106  		// set drop columns, only when recover locks
   107  		if lk.dropColumns != nil {
   108  			if cols, ok := lk.dropColumns[lockID]; ok {
   109  				l.columns = cols
   110  			}
   111  		}
   112  	}
   113  
   114  	newDDLs, cols, err := l.TrySync(info, tts)
   115  	return lockID, newDDLs, cols, err
   116  }
   117  
   118  // RemoveLock removes a lock.
   119  func (lk *LockKeeper) RemoveLock(lockID string) bool {
   120  	lk.mu.Lock()
   121  	defer lk.mu.Unlock()
   122  
   123  	_, ok := lk.locks[lockID]
   124  	delete(lk.locks, lockID)
   125  	return ok
   126  }
   127  
   128  // RemoveLockByInfo removes a lock.
   129  func (lk *LockKeeper) RemoveLockByInfo(info Info) bool {
   130  	lockID := genDDLLockID(info)
   131  	return lk.RemoveLock(lockID)
   132  }
   133  
   134  // FindLock finds a lock.
   135  func (lk *LockKeeper) FindLock(lockID string) *Lock {
   136  	lk.mu.RLock()
   137  	defer lk.mu.RUnlock()
   138  
   139  	return lk.locks[lockID]
   140  }
   141  
   142  // FindLocksByTask finds locks by task.
   143  func (lk *LockKeeper) FindLocksByTask(task string) []*Lock {
   144  	lk.mu.RLock()
   145  	defer lk.mu.RUnlock()
   146  
   147  	locks := make([]*Lock, 0)
   148  	for _, lock := range lk.locks {
   149  		if lock.Task == task {
   150  			locks = append(locks, lock)
   151  		}
   152  	}
   153  
   154  	return locks
   155  }
   156  
   157  // FindLockByInfo finds a lock with a shard DDL info.
   158  func (lk *LockKeeper) FindLockByInfo(info Info) *Lock {
   159  	return lk.FindLock(genDDLLockID(info))
   160  }
   161  
   162  // Locks return a copy of all Locks.
   163  func (lk *LockKeeper) Locks() map[string]*Lock {
   164  	lk.mu.RLock()
   165  	defer lk.mu.RUnlock()
   166  
   167  	locks := make(map[string]*Lock, len(lk.locks))
   168  	for k, v := range lk.locks {
   169  		locks[k] = v
   170  	}
   171  	return locks
   172  }
   173  
   174  // Clear clears all Locks.
   175  func (lk *LockKeeper) Clear() {
   176  	lk.mu.Lock()
   177  	defer lk.mu.Unlock()
   178  
   179  	lk.locks = make(map[string]*Lock)
   180  	lk.downstreamMetaMap = make(map[string]*DownstreamMeta)
   181  }
   182  
   183  // genDDLLockID generates DDL lock ID from its info.
   184  func genDDLLockID(info Info) string {
   185  	return utils.GenDDLLockID(info.Task, info.DownSchema, info.DownTable)
   186  }
   187  
   188  // TableKeeper used to keep initial tables for a task in optimism mode.
   189  type TableKeeper struct {
   190  	mu     sync.RWMutex
   191  	tables map[string]map[string]SourceTables // task-name -> source-ID -> tables.
   192  }
   193  
   194  // NewTableKeeper creates a new TableKeeper instance.
   195  func NewTableKeeper() *TableKeeper {
   196  	return &TableKeeper{
   197  		tables: make(map[string]map[string]SourceTables),
   198  	}
   199  }
   200  
   201  // Init (re-)initializes the keeper with initial source tables.
   202  func (tk *TableKeeper) Init(stm map[string]map[string]SourceTables) {
   203  	tk.mu.Lock()
   204  	defer tk.mu.Unlock()
   205  
   206  	tk.tables = make(map[string]map[string]SourceTables)
   207  	for task, sts := range stm {
   208  		if _, ok := tk.tables[task]; !ok {
   209  			tk.tables[task] = make(map[string]SourceTables)
   210  		}
   211  		for source, st := range sts {
   212  			tk.tables[task][source] = st
   213  		}
   214  	}
   215  }
   216  
   217  // Update adds/updates tables into the keeper or removes tables from the keeper.
   218  // it returns the newly added and dropped tables.
   219  func (tk *TableKeeper) Update(st SourceTables) (map[RouteTable]struct{}, map[RouteTable]struct{}) {
   220  	var (
   221  		oldST SourceTables
   222  		newST SourceTables
   223  	)
   224  
   225  	tk.mu.Lock()
   226  	defer tk.mu.Unlock()
   227  
   228  	// delete source tables
   229  	// this often happened when stop task with source
   230  	if st.IsDeleted {
   231  		log.L().Info("delete source tables", zap.Stringer("source table", st))
   232  		if _, ok := tk.tables[st.Task]; ok {
   233  			oldST = tk.tables[st.Task][st.Source]
   234  			delete(tk.tables[st.Task], st.Source)
   235  		}
   236  		return DiffSourceTables(oldST, newST)
   237  	}
   238  
   239  	// update source tables
   240  	// this often happen when create/drop table
   241  	if _, ok := tk.tables[st.Task]; !ok {
   242  		tk.tables[st.Task] = make(map[string]SourceTables)
   243  	}
   244  	oldST = tk.tables[st.Task][st.Source]
   245  	newST = st
   246  	tk.tables[st.Task][st.Source] = st
   247  	log.L().Info("update source tables", zap.Stringer("old source table", oldST), zap.Stringer("new source table", newST))
   248  
   249  	return DiffSourceTables(oldST, newST)
   250  }
   251  
   252  // AddTable adds a table into the source tables.
   253  // it returns whether added (not exist before).
   254  // NOTE: we only add for existing task now.
   255  func (tk *TableKeeper) AddTable(task, source, upSchema, upTable, downSchema, downTable string) bool {
   256  	tk.mu.Lock()
   257  	defer tk.mu.Unlock()
   258  
   259  	if _, ok := tk.tables[task]; !ok {
   260  		return false
   261  	}
   262  	if _, ok := tk.tables[task][source]; !ok {
   263  		tk.tables[task][source] = NewSourceTables(task, source)
   264  	}
   265  	st := tk.tables[task][source]
   266  	added := st.AddTable(upSchema, upTable, downSchema, downTable)
   267  	tk.tables[task][source] = st // assign the modified SourceTables.
   268  	return added
   269  }
   270  
   271  // SourceTableExist check whether a source table exist.
   272  func (tk *TableKeeper) SourceTableExist(task, source, upSchema, upTable, downSchema, downTable string) bool {
   273  	tk.mu.Lock()
   274  	defer tk.mu.Unlock()
   275  
   276  	if _, ok := tk.tables[task]; !ok {
   277  		return false
   278  	}
   279  	if _, ok := tk.tables[task][source]; !ok {
   280  		return false
   281  	}
   282  	st := tk.tables[task][source]
   283  	targetTable := st.TargetTable(downSchema, downTable)
   284  
   285  	if targetTable.UpTables != nil {
   286  		if tables, ok := targetTable.UpTables[upSchema]; ok {
   287  			if _, ok2 := tables[upTable]; ok2 {
   288  				return true
   289  			}
   290  		}
   291  	}
   292  	return false
   293  }
   294  
   295  // RemoveTable removes a table from the source tables.
   296  // it returns whether removed (exit before).
   297  func (tk *TableKeeper) RemoveTable(task, source, upSchema, upTable, downSchema, downTable string) bool {
   298  	tk.mu.Lock()
   299  	defer tk.mu.Unlock()
   300  
   301  	if _, ok := tk.tables[task]; !ok {
   302  		return false
   303  	}
   304  	if _, ok := tk.tables[task][source]; !ok {
   305  		return false
   306  	}
   307  	st := tk.tables[task][source]
   308  	removed := st.RemoveTable(upSchema, upTable, downSchema, downTable)
   309  	tk.tables[task][source] = st // assign the modified SourceTables.
   310  	return removed
   311  }
   312  
   313  // RemoveTableByTask removes tables from the source tables through task name.
   314  // it returns whether removed (exit before).
   315  func (tk *TableKeeper) RemoveTableByTask(task string) bool {
   316  	tk.mu.Lock()
   317  	defer tk.mu.Unlock()
   318  
   319  	if _, ok := tk.tables[task]; !ok {
   320  		return false
   321  	}
   322  	delete(tk.tables, task)
   323  	return true
   324  }
   325  
   326  // RemoveTableByTaskAndSource removes tables from the source tables through task name and sources.
   327  func (tk *TableKeeper) RemoveTableByTaskAndSources(task string, sources []string) {
   328  	tk.mu.Lock()
   329  	defer tk.mu.Unlock()
   330  
   331  	if _, ok := tk.tables[task]; !ok {
   332  		return
   333  	}
   334  
   335  	for _, source := range sources {
   336  		delete(tk.tables[task], source)
   337  	}
   338  }
   339  
   340  // FindTables finds source tables by task name and downstream table name.
   341  func (tk *TableKeeper) FindTables(task, downSchema, downTable string) []TargetTable {
   342  	tk.mu.RLock()
   343  	defer tk.mu.RUnlock()
   344  
   345  	stm, ok := tk.tables[task]
   346  	if !ok || len(stm) == 0 {
   347  		return nil
   348  	}
   349  
   350  	ret := make([]TargetTable, 0, len(stm))
   351  	for _, st := range stm {
   352  		if tt := st.TargetTable(downSchema, downTable); !tt.IsEmpty() {
   353  			ret = append(ret, tt)
   354  		}
   355  	}
   356  
   357  	sort.Sort(TargetTableSlice(ret))
   358  	return ret
   359  }
   360  
   361  // TargetTablesForTask returns TargetTable list for a specified task and downstream table.
   362  // stm: task name -> upstream source ID -> SourceTables.
   363  func TargetTablesForTask(task, downSchema, downTable string, stm map[string]map[string]SourceTables) []TargetTable {
   364  	sts, ok := stm[task]
   365  	if !ok || len(sts) == 0 {
   366  		return nil
   367  	}
   368  
   369  	ret := make([]TargetTable, 0, len(sts))
   370  	for _, st := range sts {
   371  		if tt := st.TargetTable(downSchema, downTable); !tt.IsEmpty() {
   372  			ret = append(ret, tt)
   373  		}
   374  	}
   375  
   376  	sort.Sort(TargetTableSlice(ret))
   377  	return ret
   378  }
   379  
   380  // TargetTableSlice attaches the methods of Interface to []TargetTable,
   381  // sorting in increasing order according to `Source` field.
   382  type TargetTableSlice []TargetTable
   383  
   384  // Len implements Sorter.Len.
   385  func (t TargetTableSlice) Len() int {
   386  	return len(t)
   387  }
   388  
   389  // Less implements Sorter.Less.
   390  func (t TargetTableSlice) Less(i, j int) bool {
   391  	return t[i].Source < t[j].Source
   392  }
   393  
   394  // Swap implements Sorter.Swap.
   395  func (t TargetTableSlice) Swap(i, j int) {
   396  	t[i], t[j] = t[j], t[i]
   397  }