github.com/ethereum-optimism/optimism@v1.7.2/op-node/p2p/store/records_book.go (about) 1 package store 2 3 import ( 4 "context" 5 "encoding" 6 "errors" 7 "fmt" 8 "sync" 9 "time" 10 11 "github.com/ethereum-optimism/optimism/op-service/clock" 12 "github.com/ethereum/go-ethereum/log" 13 lru "github.com/hashicorp/golang-lru/v2" 14 ds "github.com/ipfs/go-datastore" 15 "github.com/ipfs/go-datastore/query" 16 ) 17 18 const ( 19 maxPruneBatchSize = 20 20 ) 21 22 type record interface { 23 SetLastUpdated(time.Time) 24 LastUpdated() time.Time 25 encoding.BinaryMarshaler 26 encoding.BinaryUnmarshaler 27 } 28 29 type recordDiff[V record] interface { 30 Apply(v V) 31 } 32 33 var UnknownRecordErr = errors.New("unknown record") 34 35 // recordsBook is a generic K-V store to embed in the extended-peerstore. 36 // It prunes old entries to keep the store small. 37 // The recordsBook can be wrapped to customize typing more. 38 type recordsBook[K ~string, V record] struct { 39 ctx context.Context 40 cancelFn context.CancelFunc 41 clock clock.Clock 42 log log.Logger 43 bgTasks sync.WaitGroup 44 store ds.Batching 45 cache *lru.Cache[K, V] 46 newRecord func() V 47 dsBaseKey ds.Key 48 dsEntryKey func(K) ds.Key 49 recordExpiry time.Duration // pruning is disabled if this is 0 50 sync.RWMutex 51 } 52 53 func newRecordsBook[K ~string, V record](ctx context.Context, logger log.Logger, clock clock.Clock, store ds.Batching, cacheSize int, recordExpiry time.Duration, 54 dsBaseKey ds.Key, newRecord func() V, dsEntryKey func(K) ds.Key) (*recordsBook[K, V], error) { 55 cache, err := lru.New[K, V](cacheSize) 56 if err != nil { 57 return nil, fmt.Errorf("failed to create records cache: %w", err) 58 } 59 60 ctx, cancelFn := context.WithCancel(ctx) 61 book := &recordsBook[K, V]{ 62 ctx: ctx, 63 cancelFn: cancelFn, 64 clock: clock, 65 log: logger, 66 store: store, 67 cache: cache, 68 newRecord: newRecord, 69 dsBaseKey: dsBaseKey, 70 dsEntryKey: dsEntryKey, 71 recordExpiry: recordExpiry, 72 } 73 return book, nil 74 } 75 76 func (d *recordsBook[K, V]) startGC() { 77 if d.recordExpiry == 0 { 78 return 79 } 80 startGc(d.ctx, d.log, d.clock, &d.bgTasks, d.prune) 81 } 82 83 func (d *recordsBook[K, V]) GetRecord(key K) (V, error) { 84 d.RLock() 85 defer d.RUnlock() 86 rec, err := d.getRecord(key) 87 return rec, err 88 } 89 90 func (d *recordsBook[K, V]) dsKey(key K) ds.Key { 91 return d.dsBaseKey.Child(d.dsEntryKey(key)) 92 } 93 94 func (d *recordsBook[K, V]) deleteRecord(key K) error { 95 d.cache.Remove(key) 96 err := d.store.Delete(d.ctx, d.dsKey(key)) 97 if err == nil || errors.Is(err, ds.ErrNotFound) { 98 return nil 99 } 100 return fmt.Errorf("failed to delete entry with key %v: %w", key, err) 101 } 102 103 func (d *recordsBook[K, V]) getRecord(key K) (v V, err error) { 104 if val, ok := d.cache.Get(key); ok { 105 if d.hasExpired(val) { 106 return v, UnknownRecordErr 107 } 108 return val, nil 109 } 110 data, err := d.store.Get(d.ctx, d.dsKey(key)) 111 if errors.Is(err, ds.ErrNotFound) { 112 return v, UnknownRecordErr 113 } else if err != nil { 114 return v, fmt.Errorf("failed to load value of key %v: %w", key, err) 115 } 116 v = d.newRecord() 117 if err := v.UnmarshalBinary(data); err != nil { 118 return v, fmt.Errorf("invalid value for key %v: %w", key, err) 119 } 120 if d.hasExpired(v) { 121 return v, UnknownRecordErr 122 } 123 d.cache.Add(key, v) 124 return v, nil 125 } 126 127 func (d *recordsBook[K, V]) SetRecord(key K, diff recordDiff[V]) (V, error) { 128 d.Lock() 129 defer d.Unlock() 130 rec, err := d.getRecord(key) 131 if err == UnknownRecordErr { // instantiate new record if it does not exist yet 132 rec = d.newRecord() 133 } else if err != nil { 134 return d.newRecord(), err 135 } 136 rec.SetLastUpdated(d.clock.Now()) 137 diff.Apply(rec) 138 data, err := rec.MarshalBinary() 139 if err != nil { 140 return d.newRecord(), fmt.Errorf("failed to encode record for key %v: %w", key, err) 141 } 142 err = d.store.Put(d.ctx, d.dsKey(key), data) 143 if err != nil { 144 return d.newRecord(), fmt.Errorf("storing updated record for key %v: %w", key, err) 145 } 146 d.cache.Add(key, rec) 147 return rec, nil 148 } 149 150 // prune deletes entries from the store that are older than the configured prune expiration. 151 // Entries that are eligible for deletion may still be present either because the prune function hasn't yet run or 152 // because they are still preserved in the in-memory cache after having been deleted from the database. 153 // Such expired entries are filtered out in getRecord 154 func (d *recordsBook[K, V]) prune() error { 155 results, err := d.store.Query(d.ctx, query.Query{ 156 Prefix: d.dsBaseKey.String(), 157 }) 158 if err != nil { 159 return err 160 } 161 pending := 0 162 batch, err := d.store.Batch(d.ctx) 163 if err != nil { 164 return err 165 } 166 for result := range results.Next() { 167 // Bail out if the context is done 168 select { 169 case <-d.ctx.Done(): 170 return d.ctx.Err() 171 default: 172 } 173 v := d.newRecord() 174 if err := v.UnmarshalBinary(result.Value); err != nil { 175 return err 176 } 177 if d.hasExpired(v) { 178 if pending > maxPruneBatchSize { 179 if err := batch.Commit(d.ctx); err != nil { 180 return err 181 } 182 batch, err = d.store.Batch(d.ctx) 183 if err != nil { 184 return err 185 } 186 pending = 0 187 } 188 pending++ 189 if err := batch.Delete(d.ctx, ds.NewKey(result.Key)); err != nil { 190 return err 191 } 192 } 193 } 194 if err := batch.Commit(d.ctx); err != nil { 195 return err 196 } 197 return nil 198 } 199 200 func (d *recordsBook[K, V]) hasExpired(v V) bool { 201 return v.LastUpdated().Add(d.recordExpiry).Before(d.clock.Now()) 202 } 203 204 func (d *recordsBook[K, V]) Close() { 205 d.cancelFn() 206 d.bgTasks.Wait() 207 }