github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/storage/mock/db/db.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package db implements a mock store that keeps all chunk data in LevelDB database. 18 package db 19 20 import ( 21 "archive/tar" 22 "bytes" 23 "encoding/json" 24 "errors" 25 "fmt" 26 "io" 27 "io/ioutil" 28 "sync" 29 "time" 30 31 "github.com/syndtr/goleveldb/leveldb" 32 "github.com/syndtr/goleveldb/leveldb/util" 33 34 "github.com/ethereum/go-ethereum/common" 35 "github.com/ethereum/go-ethereum/swarm/storage/mock" 36 ) 37 38 // GlobalStore contains the LevelDB database that is storing 39 // chunk data for all swarm nodes. 40 // Closing the GlobalStore with Close method is required to 41 // release resources used by the database. 42 type GlobalStore struct { 43 db *leveldb.DB 44 // protects nodes and keys indexes 45 // in Put and Delete methods 46 nodesLocks sync.Map 47 keysLocks sync.Map 48 } 49 50 // NewGlobalStore creates a new instance of GlobalStore. 51 func NewGlobalStore(path string) (s *GlobalStore, err error) { 52 db, err := leveldb.OpenFile(path, nil) 53 if err != nil { 54 return nil, err 55 } 56 return &GlobalStore{ 57 db: db, 58 }, nil 59 } 60 61 // Close releases the resources used by the underlying LevelDB. 62 func (s *GlobalStore) Close() error { 63 return s.db.Close() 64 } 65 66 // NewNodeStore returns a new instance of NodeStore that retrieves and stores 67 // chunk data only for a node with address addr. 68 func (s *GlobalStore) NewNodeStore(addr common.Address) *mock.NodeStore { 69 return mock.NewNodeStore(addr, s) 70 } 71 72 // Get returns chunk data if the chunk with key exists for node 73 // on address addr. 74 func (s *GlobalStore) Get(addr common.Address, key []byte) (data []byte, err error) { 75 has, err := s.db.Has(indexForHashesPerNode(addr, key), nil) 76 if err != nil { 77 return nil, mock.ErrNotFound 78 } 79 if !has { 80 return nil, mock.ErrNotFound 81 } 82 data, err = s.db.Get(indexDataKey(key), nil) 83 if err == leveldb.ErrNotFound { 84 err = mock.ErrNotFound 85 } 86 return 87 } 88 89 // Put saves the chunk data for node with address addr. 90 func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error { 91 unlock, err := s.lock(addr, key) 92 if err != nil { 93 return err 94 } 95 defer unlock() 96 97 batch := new(leveldb.Batch) 98 batch.Put(indexForHashesPerNode(addr, key), nil) 99 batch.Put(indexForNodesWithHash(key, addr), nil) 100 batch.Put(indexForNodes(addr), nil) 101 batch.Put(indexForHashes(key), nil) 102 batch.Put(indexDataKey(key), data) 103 return s.db.Write(batch, nil) 104 } 105 106 // Delete removes the chunk reference to node with address addr. 107 func (s *GlobalStore) Delete(addr common.Address, key []byte) error { 108 unlock, err := s.lock(addr, key) 109 if err != nil { 110 return err 111 } 112 defer unlock() 113 114 batch := new(leveldb.Batch) 115 batch.Delete(indexForHashesPerNode(addr, key)) 116 batch.Delete(indexForNodesWithHash(key, addr)) 117 118 // check if this node contains any keys, and if not 119 // remove it from the 120 x := indexForHashesPerNodePrefix(addr) 121 if k, _ := s.db.Get(x, nil); !bytes.HasPrefix(k, x) { 122 batch.Delete(indexForNodes(addr)) 123 } 124 125 x = indexForNodesWithHashPrefix(key) 126 if k, _ := s.db.Get(x, nil); !bytes.HasPrefix(k, x) { 127 batch.Delete(indexForHashes(key)) 128 } 129 return s.db.Write(batch, nil) 130 } 131 132 // HasKey returns whether a node with addr contains the key. 133 func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool { 134 has, err := s.db.Has(indexForHashesPerNode(addr, key), nil) 135 if err != nil { 136 has = false 137 } 138 return has 139 } 140 141 // Keys returns a paginated list of keys on all nodes. 142 func (s *GlobalStore) Keys(startKey []byte, limit int) (keys mock.Keys, err error) { 143 return s.keys(nil, startKey, limit) 144 } 145 146 // Nodes returns a paginated list of all known nodes. 147 func (s *GlobalStore) Nodes(startAddr *common.Address, limit int) (nodes mock.Nodes, err error) { 148 return s.nodes(nil, startAddr, limit) 149 } 150 151 // NodeKeys returns a paginated list of keys on a node with provided address. 152 func (s *GlobalStore) NodeKeys(addr common.Address, startKey []byte, limit int) (keys mock.Keys, err error) { 153 return s.keys(&addr, startKey, limit) 154 } 155 156 // KeyNodes returns a paginated list of nodes that contain a particular key. 157 func (s *GlobalStore) KeyNodes(key []byte, startAddr *common.Address, limit int) (nodes mock.Nodes, err error) { 158 return s.nodes(key, startAddr, limit) 159 } 160 161 // keys returns a paginated list of keys. If addr is not nil, only keys on that 162 // node will be returned. 163 func (s *GlobalStore) keys(addr *common.Address, startKey []byte, limit int) (keys mock.Keys, err error) { 164 iter := s.db.NewIterator(nil, nil) 165 defer iter.Release() 166 167 if limit <= 0 { 168 limit = mock.DefaultLimit 169 } 170 171 prefix := []byte{indexForHashesPrefix} 172 if addr != nil { 173 prefix = indexForHashesPerNodePrefix(*addr) 174 } 175 if startKey != nil { 176 if addr != nil { 177 startKey = indexForHashesPerNode(*addr, startKey) 178 } else { 179 startKey = indexForHashes(startKey) 180 } 181 } else { 182 startKey = prefix 183 } 184 185 ok := iter.Seek(startKey) 186 if !ok { 187 return keys, iter.Error() 188 } 189 for ; ok; ok = iter.Next() { 190 k := iter.Key() 191 if !bytes.HasPrefix(k, prefix) { 192 break 193 } 194 key := append([]byte(nil), bytes.TrimPrefix(k, prefix)...) 195 196 if len(keys.Keys) >= limit { 197 keys.Next = key 198 break 199 } 200 201 keys.Keys = append(keys.Keys, key) 202 } 203 return keys, iter.Error() 204 } 205 206 // nodes returns a paginated list of node addresses. If key is not nil, 207 // only nodes that contain that key will be returned. 208 func (s *GlobalStore) nodes(key []byte, startAddr *common.Address, limit int) (nodes mock.Nodes, err error) { 209 iter := s.db.NewIterator(nil, nil) 210 defer iter.Release() 211 212 if limit <= 0 { 213 limit = mock.DefaultLimit 214 } 215 216 prefix := []byte{indexForNodesPrefix} 217 if key != nil { 218 prefix = indexForNodesWithHashPrefix(key) 219 } 220 startKey := prefix 221 if startAddr != nil { 222 if key != nil { 223 startKey = indexForNodesWithHash(key, *startAddr) 224 } else { 225 startKey = indexForNodes(*startAddr) 226 } 227 } 228 229 ok := iter.Seek(startKey) 230 if !ok { 231 return nodes, iter.Error() 232 } 233 for ; ok; ok = iter.Next() { 234 k := iter.Key() 235 if !bytes.HasPrefix(k, prefix) { 236 break 237 } 238 addr := common.BytesToAddress(append([]byte(nil), bytes.TrimPrefix(k, prefix)...)) 239 240 if len(nodes.Addrs) >= limit { 241 nodes.Next = &addr 242 break 243 } 244 245 nodes.Addrs = append(nodes.Addrs, addr) 246 } 247 return nodes, iter.Error() 248 } 249 250 // Import reads tar archive from a reader that contains exported chunk data. 251 // It returns the number of chunks imported and an error. 252 func (s *GlobalStore) Import(r io.Reader) (n int, err error) { 253 tr := tar.NewReader(r) 254 255 for { 256 hdr, err := tr.Next() 257 if err != nil { 258 if err == io.EOF { 259 break 260 } 261 return n, err 262 } 263 264 data, err := ioutil.ReadAll(tr) 265 if err != nil { 266 return n, err 267 } 268 269 var c mock.ExportedChunk 270 if err = json.Unmarshal(data, &c); err != nil { 271 return n, err 272 } 273 274 key := common.Hex2Bytes(hdr.Name) 275 276 batch := new(leveldb.Batch) 277 for _, addr := range c.Addrs { 278 batch.Put(indexForHashesPerNode(addr, key), nil) 279 batch.Put(indexForNodesWithHash(key, addr), nil) 280 batch.Put(indexForNodes(addr), nil) 281 } 282 283 batch.Put(indexForHashes(key), nil) 284 batch.Put(indexDataKey(key), c.Data) 285 286 if err = s.db.Write(batch, nil); err != nil { 287 return n, err 288 } 289 290 n++ 291 } 292 return n, err 293 } 294 295 // Export writes to a writer a tar archive with all chunk data from 296 // the store. It returns the number fo chunks exported and an error. 297 func (s *GlobalStore) Export(w io.Writer) (n int, err error) { 298 tw := tar.NewWriter(w) 299 defer tw.Close() 300 301 buf := bytes.NewBuffer(make([]byte, 0, 1024)) 302 encoder := json.NewEncoder(buf) 303 304 snap, err := s.db.GetSnapshot() 305 if err != nil { 306 return 0, err 307 } 308 309 iter := snap.NewIterator(util.BytesPrefix([]byte{indexForHashesByNodePrefix}), nil) 310 defer iter.Release() 311 312 var currentKey string 313 var addrs []common.Address 314 315 saveChunk := func() error { 316 hexKey := currentKey 317 318 data, err := snap.Get(indexDataKey(common.Hex2Bytes(hexKey)), nil) 319 if err != nil { 320 return fmt.Errorf("get data %s: %v", hexKey, err) 321 } 322 323 buf.Reset() 324 if err = encoder.Encode(mock.ExportedChunk{ 325 Addrs: addrs, 326 Data: data, 327 }); err != nil { 328 return err 329 } 330 331 d := buf.Bytes() 332 hdr := &tar.Header{ 333 Name: hexKey, 334 Mode: 0644, 335 Size: int64(len(d)), 336 } 337 if err := tw.WriteHeader(hdr); err != nil { 338 return err 339 } 340 if _, err := tw.Write(d); err != nil { 341 return err 342 } 343 n++ 344 return nil 345 } 346 347 for iter.Next() { 348 k := bytes.TrimPrefix(iter.Key(), []byte{indexForHashesByNodePrefix}) 349 i := bytes.Index(k, []byte{keyTermByte}) 350 if i < 0 { 351 continue 352 } 353 hexKey := string(k[:i]) 354 355 if currentKey == "" { 356 currentKey = hexKey 357 } 358 359 if hexKey != currentKey { 360 if err = saveChunk(); err != nil { 361 return n, err 362 } 363 364 addrs = addrs[:0] 365 } 366 367 currentKey = hexKey 368 addrs = append(addrs, common.BytesToAddress(k[i+1:])) 369 } 370 371 if len(addrs) > 0 { 372 if err = saveChunk(); err != nil { 373 return n, err 374 } 375 } 376 377 return n, iter.Error() 378 } 379 380 var ( 381 // maximal time for lock to wait until it returns error 382 lockTimeout = 3 * time.Second 383 // duration between two lock checks. 384 lockCheckDelay = 30 * time.Microsecond 385 // error returned by lock method when lock timeout is reached 386 errLockTimeout = errors.New("lock timeout") 387 ) 388 389 // lock protects parallel writes in Put and Delete methods for both 390 // node with provided address and for data with provided key. 391 func (s *GlobalStore) lock(addr common.Address, key []byte) (unlock func(), err error) { 392 start := time.Now() 393 nodeLockKey := addr.Hex() 394 for { 395 _, loaded := s.nodesLocks.LoadOrStore(nodeLockKey, struct{}{}) 396 if !loaded { 397 break 398 } 399 time.Sleep(lockCheckDelay) 400 if time.Since(start) > lockTimeout { 401 return nil, errLockTimeout 402 } 403 } 404 start = time.Now() 405 keyLockKey := common.Bytes2Hex(key) 406 for { 407 _, loaded := s.keysLocks.LoadOrStore(keyLockKey, struct{}{}) 408 if !loaded { 409 break 410 } 411 time.Sleep(lockCheckDelay) 412 if time.Since(start) > lockTimeout { 413 return nil, errLockTimeout 414 } 415 } 416 return func() { 417 s.nodesLocks.Delete(nodeLockKey) 418 s.keysLocks.Delete(keyLockKey) 419 }, nil 420 } 421 422 const ( 423 // prefixes for different indexes 424 indexDataPrefix = 0 425 indexForNodesWithHashesPrefix = 1 426 indexForHashesByNodePrefix = 2 427 indexForNodesPrefix = 3 428 indexForHashesPrefix = 4 429 430 // keyTermByte splits keys and node addresses 431 // in database keys 432 keyTermByte = 0xff 433 ) 434 435 // indexForHashesPerNode constructs a database key to store keys used in 436 // NodeKeys method. 437 func indexForHashesPerNode(addr common.Address, key []byte) []byte { 438 return append(indexForHashesPerNodePrefix(addr), key...) 439 } 440 441 // indexForHashesPerNodePrefix returns a prefix containing a node address used in 442 // NodeKeys method. Node address is hex encoded to be able to use keyTermByte 443 // for splitting node address and key. 444 func indexForHashesPerNodePrefix(addr common.Address) []byte { 445 return append([]byte{indexForNodesWithHashesPrefix}, append([]byte(addr.Hex()), keyTermByte)...) 446 } 447 448 // indexForNodesWithHash constructs a database key to store keys used in 449 // KeyNodes method. 450 func indexForNodesWithHash(key []byte, addr common.Address) []byte { 451 return append(indexForNodesWithHashPrefix(key), addr[:]...) 452 } 453 454 // indexForNodesWithHashPrefix returns a prefix containing a key used in 455 // KeyNodes method. Key is hex encoded to be able to use keyTermByte 456 // for splitting key and node address. 457 func indexForNodesWithHashPrefix(key []byte) []byte { 458 return append([]byte{indexForHashesByNodePrefix}, append([]byte(common.Bytes2Hex(key)), keyTermByte)...) 459 } 460 461 // indexForNodes constructs a database key to store keys used in 462 // Nodes method. 463 func indexForNodes(addr common.Address) []byte { 464 return append([]byte{indexForNodesPrefix}, addr[:]...) 465 } 466 467 // indexForHashes constructs a database key to store keys used in 468 // Keys method. 469 func indexForHashes(key []byte) []byte { 470 return append([]byte{indexForHashesPrefix}, key...) 471 } 472 473 // indexDataKey constructs a database key for key/data storage. 474 func indexDataKey(key []byte) []byte { 475 return append([]byte{indexDataPrefix}, key...) 476 }