github.com/matrixorigin/matrixone@v0.7.0/pkg/lockservice/lock_table.go (about)

     1  // Copyright 2023 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package lockservice
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"fmt"
    21  	"sync"
    22  )
    23  
    24  // a lockTable instance manages the locks on a table
    25  type lockTable struct {
    26  	tableID  uint64
    27  	detector *detector
    28  
    29  	mu struct {
    30  		sync.RWMutex
    31  		store LockStorage
    32  	}
    33  }
    34  
    35  func newLockTable(
    36  	tableID uint64,
    37  	detector *detector) *lockTable {
    38  	l := &lockTable{tableID: tableID, detector: detector}
    39  	l.mu.store = newBtreeBasedStorage()
    40  	return l
    41  }
    42  
    43  func (l *lockTable) lock(
    44  	ctx context.Context,
    45  	txn *activeTxn,
    46  	rows [][]byte,
    47  	options LockOptions) error {
    48  	waiter := acquireWaiter(txn.txnID)
    49  	for {
    50  		if added := l.doAcquireLock(txn, waiter, rows, options); added {
    51  			return nil
    52  		}
    53  
    54  		if err := waiter.wait(ctx); err != nil {
    55  			return err
    56  		}
    57  		waiter.resetWait()
    58  	}
    59  }
    60  
    61  func (l *lockTable) unlock(ls *cowSlice) {
    62  	locks := ls.slice()
    63  	defer locks.unref()
    64  
    65  	l.mu.Lock()
    66  	defer l.mu.Unlock()
    67  	locks.iter(func(key []byte) bool {
    68  		if lock, ok := l.mu.store.Get(key); ok {
    69  			if lock.isLockRow() || lock.isLockRangeEnd() {
    70  				lock.waiter.close()
    71  			}
    72  			l.mu.store.Delete(key)
    73  		}
    74  		return true
    75  	})
    76  }
    77  
    78  func (l *lockTable) getLock(key []byte) (Lock, bool) {
    79  	l.mu.RLock()
    80  	defer l.mu.RUnlock()
    81  	return l.mu.store.Get(key)
    82  }
    83  
    84  func (l *lockTable) doAcquireLock(
    85  	txn *activeTxn,
    86  	waiter *waiter,
    87  	rows [][]byte,
    88  	opts LockOptions) bool {
    89  	l.mu.Lock()
    90  	defer l.mu.Unlock()
    91  
    92  	switch opts.granularity {
    93  	case Row:
    94  		return l.acquireRowLockLocked(txn, waiter, rows[0], opts.mode)
    95  	case Range:
    96  		if len(rows) != 2 {
    97  			panic("invalid range lock")
    98  		}
    99  		return l.acquireRangeLockLocked(txn, waiter, rows[0], rows[1], opts.mode)
   100  	default:
   101  		panic(fmt.Sprintf("not support lock granularity %d", opts))
   102  	}
   103  }
   104  
   105  func (l *lockTable) acquireRowLockLocked(
   106  	txn *activeTxn,
   107  	w *waiter,
   108  	row []byte,
   109  	mode LockMode) bool {
   110  	key, lock, ok := l.mu.store.Seek(row)
   111  	if ok &&
   112  		(bytes.Equal(key, row) ||
   113  			lock.isLockRangeEnd()) {
   114  		l.handleLockConflict(txn, w, lock)
   115  		return false
   116  	}
   117  	l.addRowLockLocked(txn, row, w, mode)
   118  	return true
   119  }
   120  
   121  func (l *lockTable) acquireRangeLockLocked(
   122  	txn *activeTxn,
   123  	w *waiter,
   124  	start, end []byte,
   125  	mode LockMode) bool {
   126  	if bytes.Compare(start, end) >= 0 {
   127  		panic(fmt.Sprintf("lock error: start[%v] is greater than end[%v]",
   128  			start, end))
   129  	}
   130  	key, lock, ok := l.mu.store.Seek(start)
   131  	if ok &&
   132  		bytes.Compare(key, end) <= 0 {
   133  		l.handleLockConflict(txn, w, lock)
   134  		return false
   135  	}
   136  
   137  	l.addRangeLockLocked(txn, start, end, w, mode)
   138  	return true
   139  }
   140  
   141  func (l *lockTable) addRowLockLocked(
   142  	txn *activeTxn,
   143  	row []byte,
   144  	waiter *waiter,
   145  	mode LockMode) {
   146  	lock := newRowLock(txn.txnID, mode)
   147  	lock.waiter = waiter
   148  
   149  	// we must first add the lock to txn to ensure that the
   150  	// lock can be read when the deadlock is detected.
   151  	txn.lockAdded(l.tableID, [][]byte{row})
   152  	l.mu.store.Add(row, lock)
   153  }
   154  
   155  func (l *lockTable) addRangeLockLocked(
   156  	txn *activeTxn,
   157  	start, end []byte,
   158  	waiter *waiter,
   159  	mode LockMode) {
   160  	startLock, endLock := newRangeLock(txn.txnID, mode)
   161  	startLock.waiter = waiter
   162  	endLock.waiter = waiter
   163  
   164  	// we must first add the lock to txn to ensure that the
   165  	// lock can be read when the deadlock is detected.
   166  	txn.lockAdded(l.tableID, [][]byte{start, end})
   167  	l.mu.store.Add(start, startLock)
   168  	l.mu.store.Add(end, endLock)
   169  }
   170  
   171  func (l *lockTable) handleLockConflict(txn *activeTxn, w *waiter, conflictWith Lock) {
   172  	// find conflict, and wait prev txn completed, and a new
   173  	// waiter added, we need to active deadlock check.
   174  	txn.setBlocked(w)
   175  	conflictWith.waiter.add(w)
   176  	if err := l.detector.check(txn.txnID); err != nil {
   177  		panic("BUG: active dead lock check can not fail")
   178  	}
   179  }