github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/pkg/causality/txn_cache.go (about)

     1  // Copyright 2024 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package causality
    15  
    16  import (
    17  	"sync/atomic"
    18  
    19  	"github.com/pingcap/log"
    20  	"go.uber.org/zap"
    21  )
    22  
    23  const (
    24  	// BlockStrategyWaitAvailable means the cache will block until there is an available slot.
    25  	BlockStrategyWaitAvailable BlockStrategy = "waitAvailable"
    26  	// BlockStrategyWaitEmpty means the cache will block until all cached txns are consumed.
    27  	BlockStrategyWaitEmpty = "waitEmpty"
    28  	// TODO: maybe we can implement a strategy that can automatically adapt to different scenarios
    29  )
    30  
    31  // BlockStrategy is the strategy to handle the situation when the cache is full.
    32  type BlockStrategy string
    33  
    34  type txnEvent interface {
    35  	// OnConflictResolved is called when the event leaves ConflictDetector.
    36  	OnConflictResolved()
    37  	// ConflictKeys returns the keys that the event conflicts with.
    38  	ConflictKeys() []uint64
    39  }
    40  
    41  // TxnWithNotifier is a wrapper of txnEvent with a PostTxnExecuted.
    42  type TxnWithNotifier[Txn txnEvent] struct {
    43  	TxnEvent Txn
    44  	// The PostTxnExecuted will remove the txn related Node in the conflict detector's
    45  	// dependency graph and resolve related dependencies for these transacitons
    46  	// which depend on this executed txn.
    47  	//
    48  	// NOTE: the PostTxnExecuted() must be called after the txn executed.
    49  	PostTxnExecuted func()
    50  }
    51  
    52  // TxnCacheOption is the option for creating a cache for resolved txns.
    53  type TxnCacheOption struct {
    54  	// Count controls the number of caches, txns in different caches could be executed concurrently.
    55  	Count int
    56  	// Size controls the max number of txns a cache can hold.
    57  	Size int
    58  	// BlockStrategy controls the strategy when the cache is full.
    59  	BlockStrategy BlockStrategy
    60  }
    61  
    62  // In current implementation, the conflict detector will push txn to the txnCache.
    63  type txnCache[Txn txnEvent] interface {
    64  	// add adds a event to the Cache.
    65  	add(txn TxnWithNotifier[Txn]) bool
    66  	// out returns a channel to receive events which are ready to be executed.
    67  	out() <-chan TxnWithNotifier[Txn]
    68  }
    69  
    70  func newTxnCache[Txn txnEvent](opt TxnCacheOption) txnCache[Txn] {
    71  	log.Info("create new worker cache in conflict detector",
    72  		zap.Int("cacheCount", opt.Count),
    73  		zap.Int("cacheSize", opt.Size), zap.String("BlockStrategy", string(opt.BlockStrategy)))
    74  	if opt.Size <= 0 {
    75  		log.Panic("WorkerOption.CacheSize should be greater than 0, please report a bug")
    76  	}
    77  
    78  	switch opt.BlockStrategy {
    79  	case BlockStrategyWaitAvailable:
    80  		return &boundedTxnCache[Txn]{ch: make(chan TxnWithNotifier[Txn], opt.Size)}
    81  	case BlockStrategyWaitEmpty:
    82  		return &boundedTxnCacheWithBlock[Txn]{ch: make(chan TxnWithNotifier[Txn], opt.Size)}
    83  	default:
    84  		return nil
    85  	}
    86  }
    87  
    88  // boundedTxnCache is a cache which has a limit on the number of txns it can hold.
    89  //
    90  //nolint:unused
    91  type boundedTxnCache[Txn txnEvent] struct {
    92  	ch chan TxnWithNotifier[Txn]
    93  }
    94  
    95  //nolint:unused
    96  func (w *boundedTxnCache[Txn]) add(txn TxnWithNotifier[Txn]) bool {
    97  	select {
    98  	case w.ch <- txn:
    99  		return true
   100  	default:
   101  		return false
   102  	}
   103  }
   104  
   105  //nolint:unused
   106  func (w *boundedTxnCache[Txn]) out() <-chan TxnWithNotifier[Txn] {
   107  	return w.ch
   108  }
   109  
   110  // boundedTxnCacheWithBlock is a special boundedWorker. Once the cache
   111  // is full, it will block until all cached txns are consumed.
   112  type boundedTxnCacheWithBlock[Txn txnEvent] struct {
   113  	ch chan TxnWithNotifier[Txn]
   114  	//nolint:unused
   115  	isBlocked atomic.Bool
   116  }
   117  
   118  //nolint:unused
   119  func (w *boundedTxnCacheWithBlock[Txn]) add(txn TxnWithNotifier[Txn]) bool {
   120  	if w.isBlocked.Load() && len(w.ch) <= 0 {
   121  		w.isBlocked.Store(false)
   122  	}
   123  
   124  	if !w.isBlocked.Load() {
   125  		select {
   126  		case w.ch <- txn:
   127  			return true
   128  		default:
   129  			w.isBlocked.CompareAndSwap(false, true)
   130  		}
   131  	}
   132  	return false
   133  }
   134  
   135  //nolint:unused
   136  func (w *boundedTxnCacheWithBlock[Txn]) out() <-chan TxnWithNotifier[Txn] {
   137  	return w.ch
   138  }