github.com/nspcc-dev/neo-go@v0.105.2-0.20240517133400-6be757af3eba/pkg/core/storage/memcached_store.go (about)

     1  package storage
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"sort"
     7  	"strings"
     8  	"sync"
     9  )
    10  
    11  // MemCachedStore is a wrapper around persistent store that caches all changes
    12  // being made for them to be later flushed in one batch.
    13  type MemCachedStore struct {
    14  	MemoryStore
    15  
    16  	private bool
    17  	// plock protects Persist from double entrance.
    18  	plock sync.Mutex
    19  	// Persistent Store.
    20  	ps Store
    21  }
    22  
    23  type (
    24  	// KeyValue represents key-value pair.
    25  	KeyValue struct {
    26  		Key   []byte
    27  		Value []byte
    28  	}
    29  
    30  	// KeyValueExists represents key-value pair with indicator whether the item
    31  	// exists in the persistent storage.
    32  	KeyValueExists struct {
    33  		KeyValue
    34  
    35  		Exists bool
    36  	}
    37  
    38  	// MemBatch represents a changeset to be persisted.
    39  	MemBatch struct {
    40  		Put     []KeyValueExists
    41  		Deleted []KeyValueExists
    42  	}
    43  )
    44  
    45  // NewMemCachedStore creates a new MemCachedStore object.
    46  func NewMemCachedStore(lower Store) *MemCachedStore {
    47  	return &MemCachedStore{
    48  		MemoryStore: *NewMemoryStore(),
    49  		ps:          lower,
    50  	}
    51  }
    52  
    53  // NewPrivateMemCachedStore creates a new private (unlocked) MemCachedStore object.
    54  // Private cached stores are closed after Persist.
    55  func NewPrivateMemCachedStore(lower Store) *MemCachedStore {
    56  	return &MemCachedStore{
    57  		MemoryStore: *NewMemoryStore(),
    58  		private:     true,
    59  		ps:          lower,
    60  	}
    61  }
    62  
    63  // lock write-locks non-private store.
    64  func (s *MemCachedStore) lock() {
    65  	if !s.private {
    66  		s.mut.Lock()
    67  	}
    68  }
    69  
    70  // unlock unlocks non-private store.
    71  func (s *MemCachedStore) unlock() {
    72  	if !s.private {
    73  		s.mut.Unlock()
    74  	}
    75  }
    76  
    77  // rlock read-locks non-private store.
    78  func (s *MemCachedStore) rlock() {
    79  	if !s.private {
    80  		s.mut.RLock()
    81  	}
    82  }
    83  
    84  // runlock drops read lock for non-private stores.
    85  func (s *MemCachedStore) runlock() {
    86  	if !s.private {
    87  		s.mut.RUnlock()
    88  	}
    89  }
    90  
    91  // Get implements the Store interface.
    92  func (s *MemCachedStore) Get(key []byte) ([]byte, error) {
    93  	s.rlock()
    94  	defer s.runlock()
    95  	m := s.chooseMap(key)
    96  	if val, ok := m[string(key)]; ok {
    97  		if val == nil {
    98  			return nil, ErrKeyNotFound
    99  		}
   100  		return val, nil
   101  	}
   102  	return s.ps.Get(key)
   103  }
   104  
   105  // Put puts new KV pair into the store.
   106  func (s *MemCachedStore) Put(key, value []byte) {
   107  	newKey := string(key)
   108  	vcopy := bytes.Clone(value)
   109  	s.lock()
   110  	put(s.chooseMap(key), newKey, vcopy)
   111  	s.unlock()
   112  }
   113  
   114  // Delete drops KV pair from the store. Never returns an error.
   115  func (s *MemCachedStore) Delete(key []byte) {
   116  	newKey := string(key)
   117  	s.lock()
   118  	put(s.chooseMap(key), newKey, nil)
   119  	s.unlock()
   120  }
   121  
   122  // GetBatch returns currently accumulated changeset.
   123  func (s *MemCachedStore) GetBatch() *MemBatch {
   124  	s.rlock()
   125  	defer s.runlock()
   126  	var b MemBatch
   127  
   128  	b.Put = make([]KeyValueExists, 0, len(s.mem)+len(s.stor))
   129  	b.Deleted = make([]KeyValueExists, 0)
   130  	for _, m := range []map[string][]byte{s.mem, s.stor} {
   131  		for k, v := range m {
   132  			key := []byte(k)
   133  			_, err := s.ps.Get(key)
   134  			if v == nil {
   135  				b.Deleted = append(b.Deleted, KeyValueExists{KeyValue: KeyValue{Key: key}, Exists: err == nil})
   136  			} else {
   137  				b.Put = append(b.Put, KeyValueExists{KeyValue: KeyValue{Key: key, Value: v}, Exists: err == nil})
   138  			}
   139  		}
   140  	}
   141  	return &b
   142  }
   143  
   144  // PutChangeSet implements the Store interface. Never returns an error.
   145  func (s *MemCachedStore) PutChangeSet(puts map[string][]byte, stores map[string][]byte) error {
   146  	s.lock()
   147  	s.MemoryStore.putChangeSet(puts, stores)
   148  	s.unlock()
   149  	return nil
   150  }
   151  
   152  // Seek implements the Store interface.
   153  func (s *MemCachedStore) Seek(rng SeekRange, f func(k, v []byte) bool) {
   154  	ps, memRes := s.prepareSeekMemSnapshot(rng)
   155  	performSeek(context.Background(), ps, memRes, rng, false, f)
   156  }
   157  
   158  // GetStorageChanges returns all current storage changes. It can only be done for private
   159  // MemCachedStore.
   160  func (s *MemCachedStore) GetStorageChanges() map[string][]byte {
   161  	if !s.private {
   162  		panic("GetStorageChanges called on shared MemCachedStore")
   163  	}
   164  	return s.stor
   165  }
   166  
   167  // SeekAsync returns non-buffered channel with matching KeyValue pairs. Key and
   168  // value slices may not be copied and may be modified. SeekAsync can guarantee
   169  // that key-value items are sorted by key in ascending way.
   170  func (s *MemCachedStore) SeekAsync(ctx context.Context, rng SeekRange, cutPrefix bool) chan KeyValue {
   171  	res := make(chan KeyValue)
   172  	ps, memRes := s.prepareSeekMemSnapshot(rng)
   173  	go func() {
   174  		performSeek(ctx, ps, memRes, rng, cutPrefix, func(k, v []byte) bool {
   175  			select {
   176  			case <-ctx.Done():
   177  				return false
   178  			case res <- KeyValue{Key: k, Value: v}:
   179  				return true
   180  			}
   181  		})
   182  		close(res)
   183  	}()
   184  
   185  	return res
   186  }
   187  
   188  // prepareSeekMemSnapshot prepares memory store snapshot of `stor`/`mem` in order
   189  // not to hold the lock over MemCachedStore throughout the whole Seek operation.
   190  // The results of prepareSeekMemSnapshot can be safely used as performSeek arguments.
   191  func (s *MemCachedStore) prepareSeekMemSnapshot(rng SeekRange) (Store, []KeyValueExists) {
   192  	var memRes []KeyValueExists
   193  	sPrefix := string(rng.Prefix)
   194  	lPrefix := len(sPrefix)
   195  	sStart := string(rng.Start)
   196  	lStart := len(sStart)
   197  	isKeyOK := func(key string) bool {
   198  		return strings.HasPrefix(key, sPrefix) && (lStart == 0 || strings.Compare(key[lPrefix:], sStart) >= 0)
   199  	}
   200  	if rng.Backwards {
   201  		isKeyOK = func(key string) bool {
   202  			return strings.HasPrefix(key, sPrefix) && (lStart == 0 || strings.Compare(key[lPrefix:], sStart) <= 0)
   203  		}
   204  	}
   205  	s.rlock()
   206  	m := s.MemoryStore.chooseMap(rng.Prefix)
   207  	for k, v := range m {
   208  		if isKeyOK(k) {
   209  			memRes = append(memRes, KeyValueExists{
   210  				KeyValue: KeyValue{
   211  					Key:   []byte(k),
   212  					Value: v,
   213  				},
   214  				Exists: v != nil,
   215  			})
   216  		}
   217  	}
   218  	ps := s.ps
   219  	s.runlock()
   220  	return ps, memRes
   221  }
   222  
   223  // performSeek is internal representations of Seek* capable of seeking for the given key
   224  // and supporting early stop using provided context. `ps` is a captured underlying
   225  // persistent storage. `memRes` is a snapshot of suitable cached items prepared
   226  // by prepareSeekMemSnapshot.
   227  //
   228  // `cutPrefix` denotes whether provided key needs to be cut off the resulting keys.
   229  // `rng` specifies prefix items must match and point to start seeking from. Backwards
   230  // seeking from some point is supported with corresponding `rng` field set.
   231  func performSeek(ctx context.Context, ps Store, memRes []KeyValueExists, rng SeekRange, cutPrefix bool, f func(k, v []byte) bool) {
   232  	lPrefix := len(string(rng.Prefix))
   233  	less := func(k1, k2 []byte) bool {
   234  		res := bytes.Compare(k1, k2)
   235  		return res != 0 && rng.Backwards == (res > 0)
   236  	}
   237  	// Sort memRes items for further comparison with ps items.
   238  	sort.Slice(memRes, func(i, j int) bool {
   239  		return less(memRes[i].Key, memRes[j].Key)
   240  	})
   241  
   242  	var (
   243  		done    bool
   244  		iMem    int
   245  		kvMem   KeyValueExists
   246  		haveMem bool
   247  	)
   248  	if iMem < len(memRes) {
   249  		kvMem = memRes[iMem]
   250  		haveMem = true
   251  		iMem++
   252  	}
   253  	// Merge results of seek operations in ascending order. It returns whether iterating
   254  	// should be continued.
   255  	mergeFunc := func(k, v []byte) bool {
   256  		if done {
   257  			return false
   258  		}
   259  		kvPs := KeyValue{
   260  			Key:   bytes.Clone(k),
   261  			Value: bytes.Clone(v),
   262  		}
   263  		for {
   264  			select {
   265  			case <-ctx.Done():
   266  				done = true
   267  				return false
   268  			default:
   269  				var isMem = haveMem && less(kvMem.Key, kvPs.Key)
   270  				if isMem {
   271  					if kvMem.Exists {
   272  						if cutPrefix {
   273  							kvMem.Key = kvMem.Key[lPrefix:]
   274  						}
   275  						if !f(kvMem.Key, kvMem.Value) {
   276  							done = true
   277  							return false
   278  						}
   279  					}
   280  					if iMem < len(memRes) {
   281  						kvMem = memRes[iMem]
   282  						haveMem = true
   283  						iMem++
   284  					} else {
   285  						haveMem = false
   286  					}
   287  				} else {
   288  					if !bytes.Equal(kvMem.Key, kvPs.Key) {
   289  						if cutPrefix {
   290  							kvPs.Key = kvPs.Key[lPrefix:]
   291  						}
   292  						if !f(kvPs.Key, kvPs.Value) {
   293  							done = true
   294  							return false
   295  						}
   296  					}
   297  					return true
   298  				}
   299  			}
   300  		}
   301  	}
   302  	if rng.SearchDepth == 0 || rng.SearchDepth > 1 {
   303  		if rng.SearchDepth > 1 {
   304  			rng.SearchDepth--
   305  		}
   306  		ps.Seek(rng, mergeFunc)
   307  	}
   308  
   309  	if !done && haveMem {
   310  	loop:
   311  		for i := iMem - 1; i < len(memRes); i++ {
   312  			select {
   313  			case <-ctx.Done():
   314  				break loop
   315  			default:
   316  				kvMem = memRes[i]
   317  				if kvMem.Exists {
   318  					if cutPrefix {
   319  						kvMem.Key = kvMem.Key[lPrefix:]
   320  					}
   321  					if !f(kvMem.Key, kvMem.Value) {
   322  						break loop
   323  					}
   324  				}
   325  			}
   326  		}
   327  	}
   328  }
   329  
   330  // Persist flushes all the MemoryStore contents into the (supposedly) persistent
   331  // store ps. MemCachedStore remains accessible for the most part of this action
   332  // (any new changes will be cached in memory).
   333  func (s *MemCachedStore) Persist() (int, error) {
   334  	return s.persist(false)
   335  }
   336  
   337  // PersistSync flushes all the MemoryStore contents into the (supposedly) persistent
   338  // store ps. It's different from Persist in that it blocks MemCachedStore completely
   339  // while flushing things from memory to persistent store.
   340  func (s *MemCachedStore) PersistSync() (int, error) {
   341  	return s.persist(true)
   342  }
   343  
   344  func (s *MemCachedStore) persist(isSync bool) (int, error) {
   345  	var err error
   346  	var keys int
   347  
   348  	if s.private {
   349  		keys = len(s.mem) + len(s.stor)
   350  		if keys == 0 {
   351  			return 0, nil
   352  		}
   353  		err = s.ps.PutChangeSet(s.mem, s.stor)
   354  		if err != nil {
   355  			return 0, err
   356  		}
   357  		s.mem = nil
   358  		s.stor = nil
   359  		return keys, nil
   360  	}
   361  
   362  	s.plock.Lock()
   363  	defer s.plock.Unlock()
   364  	s.mut.Lock()
   365  
   366  	keys = len(s.mem) + len(s.stor)
   367  	if keys == 0 {
   368  		s.mut.Unlock()
   369  		return 0, nil
   370  	}
   371  
   372  	// tempstore technically copies current s in lower layer while real s
   373  	// starts using fresh new maps. This tempstore is only known here and
   374  	// nothing ever changes it, therefore accesses to it (reads) can go
   375  	// unprotected while writes are handled by s proper.
   376  	var tempstore = &MemCachedStore{MemoryStore: MemoryStore{mem: s.mem, stor: s.stor}, ps: s.ps}
   377  	s.ps = tempstore
   378  	s.mem = make(map[string][]byte, len(s.mem))
   379  	s.stor = make(map[string][]byte, len(s.stor))
   380  	if !isSync {
   381  		s.mut.Unlock()
   382  	}
   383  	err = tempstore.ps.PutChangeSet(tempstore.mem, tempstore.stor)
   384  
   385  	if !isSync {
   386  		s.mut.Lock()
   387  	}
   388  	if err == nil {
   389  		// tempstore.mem and tempstore.del are completely flushed now
   390  		// to tempstore.ps, so all KV pairs are the same and this
   391  		// substitution has no visible effects.
   392  		s.ps = tempstore.ps
   393  	} else {
   394  		// We're toast. We'll try to still keep proper state, but OOM
   395  		// killer will get to us eventually.
   396  		for k := range s.mem {
   397  			put(tempstore.mem, k, s.mem[k])
   398  		}
   399  		for k := range s.stor {
   400  			put(tempstore.stor, k, s.stor[k])
   401  		}
   402  		s.ps = tempstore.ps
   403  		s.mem = tempstore.mem
   404  		s.stor = tempstore.stor
   405  	}
   406  	s.mut.Unlock()
   407  	return keys, err
   408  }
   409  
   410  // Close implements Store interface, clears up memory and closes the lower layer
   411  // Store.
   412  func (s *MemCachedStore) Close() error {
   413  	// It's always successful.
   414  	_ = s.MemoryStore.Close()
   415  	return s.ps.Close()
   416  }