github.com/ethersphere/bee/v2@v2.2.0/pkg/storage/storage.go (about) 1 // Copyright 2022 The Swarm Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package storage 6 7 import ( 8 "context" 9 "errors" 10 "fmt" 11 "io" 12 13 "github.com/ethersphere/bee/v2/pkg/cac" 14 "github.com/ethersphere/bee/v2/pkg/sharky" 15 "github.com/ethersphere/bee/v2/pkg/soc" 16 "github.com/ethersphere/bee/v2/pkg/swarm" 17 ) 18 19 var ( 20 ErrOverwriteNewerChunk = errors.New("overwriting chunk with newer timestamp") 21 ) 22 23 // Result represents the item returned by the read operation, which returns 24 // the item as the result. Or Key and/or Size in case the whole Item is not 25 // needed. 26 type Result struct { 27 // ID is the Key.ID of the result Item. 28 ID string 29 30 // Size is the size of the result Item. 31 Size int 32 33 // Entry in the result Item. 34 Entry Item 35 } 36 37 // IterateFn iterates through the Items of the store in the Key.Namespace. 38 // The function returns a boolean to indicate if the iteration should stop. 39 type IterateFn func(Result) (bool, error) 40 41 // Filter subtracts entries from the iteration. Filters would not construct the 42 // Item from the serialized bytes. Instead, users can add logic to check the entries 43 // directly in byte format or partially or fully unmarshal the data and check. 44 type Filter func(string, []byte) bool 45 46 // QueryItemProperty tells the Query which Item 47 // property should be loaded from the store to the result. 48 type QueryItemProperty int 49 50 const ( 51 // QueryItem indicates interest in the whole Item. 52 QueryItem QueryItemProperty = iota 53 54 // QueryItemID indicates interest in the Result.ID. 55 // No data will be unmarshalled. 56 QueryItemID 57 58 // QueryItemSize indicates interest in the Result.Size. 59 // No data will be unmarshalled. 60 QueryItemSize 61 ) 62 63 // Order represents order of the iteration 64 type Order int 65 66 const ( 67 // KeyAscendingOrder indicates a forward iteration based on ordering of keys. 68 KeyAscendingOrder Order = iota 69 70 // KeyDescendingOrder denotes the backward iteration based on ordering of keys. 71 KeyDescendingOrder 72 ) 73 74 // ErrInvalidQuery indicates that the query is not a valid query. 75 var ( 76 ErrInvalidQuery = errors.New("storage: invalid query") 77 ErrNotFound = errors.New("storage: not found") 78 ErrReferenceLength = errors.New("storage: invalid reference length") 79 ErrInvalidChunk = errors.New("storage: invalid chunk") 80 ) 81 82 // Query denotes the iteration attributes. 83 type Query struct { 84 // Factory is a constructor passed by client 85 // to construct new object for the result. 86 Factory func() Item 87 88 // Prefix indicates interest in an item 89 // that contains this prefix in its ID. 90 Prefix string 91 92 // PrefixAtStart indicates that the 93 // iteration should start at the prefix. 94 PrefixAtStart bool 95 96 // SkipFirst skips the first element in the iteration. 97 SkipFirst bool 98 99 // ItemProperty indicates a specific interest of an Item property. 100 ItemProperty QueryItemProperty 101 102 // Order denotes the order of iteration. 103 Order Order 104 105 // Filters represent further constraints on the iteration. 106 Filters []Filter 107 } 108 109 // Validate checks if the query is a valid query. 110 func (q Query) Validate() error { 111 if q.ItemProperty == QueryItem && q.Factory == nil { 112 return fmt.Errorf("missing Factory: %w", ErrInvalidQuery) 113 } 114 return nil 115 } 116 117 // Key represents the item identifiers. 118 type Key interface { 119 // ID is the unique identifier of Item. 120 ID() string 121 122 // Namespace is used to separate similar items. 123 // E.g.: can be seen as a table construct. 124 Namespace() string 125 } 126 127 // Marshaler is the interface implemented by types 128 // that can marshal themselves into valid Item. 129 type Marshaler interface { 130 Marshal() ([]byte, error) 131 } 132 133 // Unmarshaler is the interface implemented by types 134 // that can unmarshal a JSON description of themselves. 135 // The input can be assumed to be a valid encoding of 136 // a Item value. 137 type Unmarshaler interface { 138 Unmarshal([]byte) error 139 } 140 141 // Cloner makes a deep copy of the Item. 142 type Cloner interface { 143 Clone() Item 144 } 145 146 // Item represents an item which can be used in the Store. 147 type Item interface { 148 Key 149 Marshaler 150 Unmarshaler 151 Cloner 152 fmt.Stringer 153 } 154 155 // Store contains the methods required for the Data Abstraction Layer. 156 type Store interface { 157 io.Closer 158 159 Reader 160 Writer 161 } 162 163 // Reader groups methods that read from the store. 164 type Reader interface { 165 // Get unmarshalls object with the given Item.Key.ID into the given Item. 166 Get(Item) error 167 168 // Has reports whether the Item with the given Key.ID exists in the store. 169 Has(Key) (bool, error) 170 171 // GetSize returns the size of Item with the given Key.ID. 172 GetSize(Key) (int, error) 173 174 // Iterate executes the given IterateFn on this store. 175 // The Result of the iteration will be affected by the given Query. 176 Iterate(Query, IterateFn) error 177 178 // Count returns the count of items in the 179 // store that are in the same Key.Namespace. 180 Count(Key) (int, error) 181 } 182 183 // Writer groups methods that change the state of the store. 184 type Writer interface { 185 // Put inserts or updates the given Item identified by its Key.ID. 186 Put(Item) error 187 188 // Delete removes the given Item form the store. 189 // It will not return error if the key doesn't exist. 190 Delete(Item) error 191 } 192 193 // BatchStore is a store that supports batching of Writer method calls. 194 type BatchStore interface { 195 Store 196 Batcher 197 } 198 199 // Recoverer allows store to recover from a failure when 200 // the transaction was not committed or rolled back. 201 type Recoverer interface { 202 Recover() error 203 } 204 205 type IndexStore interface { 206 Reader 207 Writer 208 } 209 210 type Sharky interface { 211 Read(context.Context, sharky.Location, []byte) error 212 Write(context.Context, []byte) (sharky.Location, error) 213 Release(context.Context, sharky.Location) error 214 } 215 216 type SizeReporter interface { 217 Size() (uint64, error) 218 Capacity() uint64 219 } 220 221 // Descriptor holds information required for Pull syncing. This struct 222 // is provided by subscribing to pull index. 223 type Descriptor struct { 224 Address swarm.Address 225 BinID uint64 226 } 227 228 func (d *Descriptor) String() string { 229 if d == nil { 230 return "" 231 } 232 return fmt.Sprintf("%s bin id %v", d.Address, d.BinID) 233 } 234 235 type PullSubscriber interface { 236 SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan Descriptor, closed <-chan struct{}, stop func()) 237 } 238 239 type PushSubscriber interface { 240 SubscribePush(ctx context.Context) (c <-chan swarm.Chunk, stop func()) 241 } 242 243 type ChunkState = int 244 245 const ( 246 // ChunkSent is used by the pusher component to notify about successful push of chunk from 247 // the node. A chunk could be retried on failure so, this sent count is maintained to 248 // understand how many attempts were made by the node while pushing. The attempts are 249 // registered only when an actual request was sent from this node. 250 ChunkSent ChunkState = iota 251 // ChunkStored is used by the pusher component to notify that the uploader node is 252 // the closest node and has stored the chunk. 253 ChunkStored 254 // ChunkSynced is used by the pusher component to notify that the chunk is synced to the 255 // network. This is reported when a valid receipt was received after the chunk was 256 // pushed. 257 ChunkSynced 258 ChunkCouldNotSync 259 ) 260 261 // PushReporter is used to report chunk state. 262 type PushReporter interface { 263 Report(context.Context, swarm.Chunk, ChunkState) error 264 } 265 266 // ErrBatchCommitted is returned by Batch.Commit 267 // call when a batch has already been committed. 268 var ErrBatchCommitted = errors.New("storage: batch has already been committed") 269 270 // Batch provides set of operations that are batched. 271 type Batch interface { 272 // Put adds a new item to the batch. 273 Put(Item) error 274 275 // Delete adds a new delete operation to the batch. 276 Delete(Item) error 277 278 // Commit commits the batch. 279 Commit() error 280 } 281 282 // Batcher specifies a constructor for creating new batches. 283 type Batcher interface { 284 // Batch returns a new Batch. 285 Batch(context.Context) Batch 286 } 287 288 func ChunkType(ch swarm.Chunk) swarm.ChunkType { 289 if cac.Valid(ch) { 290 return swarm.ChunkTypeContentAddressed 291 } else if soc.Valid(ch) { 292 return swarm.ChunkTypeSingleOwner 293 } 294 return swarm.ChunkTypeUnspecified 295 }