github.com/onflow/atree@v0.6.0/storage.go (about) 1 /* 2 * Atree - Scalable Arrays and Ordered Maps 3 * 4 * Copyright 2021-2022 Dapper Labs, Inc. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19 package atree 20 21 import ( 22 "bytes" 23 "encoding/binary" 24 "fmt" 25 "sort" 26 "strings" 27 "sync" 28 29 "github.com/fxamacker/cbor/v2" 30 ) 31 32 const LedgerBaseStorageSlabPrefix = "$" 33 34 type ( 35 Address [8]byte 36 StorageIndex [8]byte 37 38 StorageID struct { 39 Address Address 40 Index StorageIndex 41 } 42 ) 43 44 var ( 45 AddressUndefined = Address{} 46 StorageIndexUndefined = StorageIndex{} 47 StorageIDUndefined = StorageID{} 48 ) 49 50 // Next returns new StorageIndex with index+1 value. 51 // The caller is responsible for preventing overflow 52 // by checking if the index value is valid before 53 // calling this function. 54 func (index StorageIndex) Next() StorageIndex { 55 i := binary.BigEndian.Uint64(index[:]) 56 57 var next StorageIndex 58 binary.BigEndian.PutUint64(next[:], i+1) 59 60 return next 61 } 62 63 func NewStorageID(address Address, index StorageIndex) StorageID { 64 return StorageID{address, index} 65 } 66 67 func NewStorageIDFromRawBytes(b []byte) (StorageID, error) { 68 if len(b) < storageIDSize { 69 return StorageID{}, NewStorageIDErrorf("incorrect storage id buffer length %d", len(b)) 70 } 71 72 var address Address 73 copy(address[:], b) 74 75 var index StorageIndex 76 copy(index[:], b[8:]) 77 78 return StorageID{address, index}, nil 79 } 80 81 func (id StorageID) ToRawBytes(b []byte) (int, error) { 82 if len(b) < storageIDSize { 83 return 0, NewStorageIDErrorf("incorrect storage id buffer length %d", len(b)) 84 } 85 copy(b, id.Address[:]) 86 copy(b[8:], id.Index[:]) 87 return storageIDSize, nil 88 } 89 90 func (id StorageID) String() string { 91 return fmt.Sprintf( 92 "0x%x.%d", 93 binary.BigEndian.Uint64(id.Address[:]), 94 binary.BigEndian.Uint64(id.Index[:]), 95 ) 96 } 97 98 func (id StorageID) AddressAsUint64() uint64 { 99 return binary.BigEndian.Uint64(id.Address[:]) 100 } 101 102 func (id StorageID) IndexAsUint64() uint64 { 103 return binary.BigEndian.Uint64(id.Index[:]) 104 } 105 106 func (id StorageID) Valid() error { 107 if id == StorageIDUndefined { 108 return NewStorageIDError("undefined storage id") 109 } 110 if id.Index == StorageIndexUndefined { 111 return NewStorageIDError("undefined storage index") 112 } 113 return nil 114 } 115 116 func (id StorageID) Compare(other StorageID) int { 117 result := bytes.Compare(id.Address[:], other.Address[:]) 118 if result == 0 { 119 return bytes.Compare(id.Index[:], other.Index[:]) 120 } 121 return result 122 } 123 124 type BaseStorageUsageReporter interface { 125 BytesRetrieved() int 126 BytesStored() int 127 SegmentsReturned() int 128 SegmentsUpdated() int 129 SegmentsTouched() int 130 ResetReporter() 131 } 132 133 type BaseStorage interface { 134 Store(StorageID, []byte) error 135 Retrieve(StorageID) ([]byte, bool, error) 136 Remove(StorageID) error 137 GenerateStorageID(Address) (StorageID, error) 138 SegmentCounts() int // number of segments stored in the storage 139 Size() int // total byte size stored 140 BaseStorageUsageReporter 141 } 142 143 type Ledger interface { 144 // GetValue gets a value for the given key in the storage, owned by the given account. 145 GetValue(owner, key []byte) (value []byte, err error) 146 // SetValue sets a value for the given key in the storage, owned by the given account. 147 SetValue(owner, key, value []byte) (err error) 148 // ValueExists returns true if the given key exists in the storage, owned by the given account. 149 ValueExists(owner, key []byte) (exists bool, err error) 150 // AllocateStorageIndex allocates a new storage index under the given account. 151 AllocateStorageIndex(owner []byte) (StorageIndex, error) 152 } 153 154 type LedgerBaseStorage struct { 155 ledger Ledger 156 bytesRetrieved int 157 bytesStored int 158 } 159 160 var _ BaseStorage = &LedgerBaseStorage{} 161 162 func NewLedgerBaseStorage(ledger Ledger) *LedgerBaseStorage { 163 return &LedgerBaseStorage{ 164 ledger: ledger, 165 bytesRetrieved: 0, 166 bytesStored: 0, 167 } 168 } 169 170 func (s *LedgerBaseStorage) Retrieve(id StorageID) ([]byte, bool, error) { 171 v, err := s.ledger.GetValue(id.Address[:], SlabIndexToLedgerKey(id.Index)) 172 s.bytesRetrieved += len(v) 173 174 if err != nil { 175 // Wrap err as external error (if needed) because err is returned by Ledger interface. 176 return nil, false, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) 177 } 178 179 return v, len(v) > 0, nil 180 } 181 182 func (s *LedgerBaseStorage) Store(id StorageID, data []byte) error { 183 s.bytesStored += len(data) 184 err := s.ledger.SetValue(id.Address[:], SlabIndexToLedgerKey(id.Index), data) 185 186 if err != nil { 187 // Wrap err as external error (if needed) because err is returned by Ledger interface. 188 return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) 189 } 190 191 return nil 192 } 193 194 func (s *LedgerBaseStorage) Remove(id StorageID) error { 195 err := s.ledger.SetValue(id.Address[:], SlabIndexToLedgerKey(id.Index), nil) 196 197 if err != nil { 198 // Wrap err as external error (if needed) because err is returned by Ledger interface. 199 return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) 200 } 201 202 return nil 203 } 204 205 func (s *LedgerBaseStorage) GenerateStorageID(address Address) (StorageID, error) { 206 idx, err := s.ledger.AllocateStorageIndex(address[:]) 207 208 if err != nil { 209 // Wrap err as external error (if needed) because err is returned by Ledger interface. 210 return StorageID{}, 211 wrapErrorfAsExternalErrorIfNeeded( 212 err, 213 fmt.Sprintf("failed to generate storage ID with address 0x%x", address), 214 ) 215 } 216 217 return NewStorageID(address, idx), nil 218 } 219 220 func SlabIndexToLedgerKey(ind StorageIndex) []byte { 221 return []byte(LedgerBaseStorageSlabPrefix + string(ind[:])) 222 } 223 224 func LedgerKeyIsSlabKey(key string) bool { 225 return strings.HasPrefix(key, LedgerBaseStorageSlabPrefix) 226 } 227 228 func (s *LedgerBaseStorage) BytesRetrieved() int { 229 return s.bytesRetrieved 230 } 231 232 func (s *LedgerBaseStorage) BytesStored() int { 233 return s.bytesStored 234 } 235 236 func (s *LedgerBaseStorage) SegmentCounts() int { 237 // TODO 238 return 0 239 } 240 241 func (s *LedgerBaseStorage) Size() int { 242 // TODO 243 return 0 244 } 245 246 func (s *LedgerBaseStorage) SegmentsReturned() int { 247 // TODO 248 return 0 249 } 250 251 func (s *LedgerBaseStorage) SegmentsUpdated() int { 252 // TODO 253 return 0 254 } 255 256 func (s *LedgerBaseStorage) SegmentsTouched() int { 257 // TODO 258 return 0 259 } 260 261 func (s *LedgerBaseStorage) ResetReporter() { 262 s.bytesStored = 0 263 s.bytesRetrieved = 0 264 } 265 266 type SlabIterator func() (StorageID, Slab) 267 268 type SlabStorage interface { 269 Store(StorageID, Slab) error 270 Retrieve(StorageID) (Slab, bool, error) 271 Remove(StorageID) error 272 GenerateStorageID(address Address) (StorageID, error) 273 Count() int 274 SlabIterator() (SlabIterator, error) 275 } 276 277 type BasicSlabStorage struct { 278 Slabs map[StorageID]Slab 279 storageIndex map[Address]StorageIndex 280 DecodeStorable StorableDecoder 281 DecodeTypeInfo TypeInfoDecoder 282 cborEncMode cbor.EncMode 283 cborDecMode cbor.DecMode 284 } 285 286 var _ SlabStorage = &BasicSlabStorage{} 287 288 func NewBasicSlabStorage( 289 cborEncMode cbor.EncMode, 290 cborDecMode cbor.DecMode, 291 decodeStorable StorableDecoder, 292 decodeTypeInfo TypeInfoDecoder, 293 ) *BasicSlabStorage { 294 return &BasicSlabStorage{ 295 Slabs: make(map[StorageID]Slab), 296 storageIndex: make(map[Address]StorageIndex), 297 cborEncMode: cborEncMode, 298 cborDecMode: cborDecMode, 299 DecodeStorable: decodeStorable, 300 DecodeTypeInfo: decodeTypeInfo, 301 } 302 } 303 304 func (s *BasicSlabStorage) GenerateStorageID(address Address) (StorageID, error) { 305 index := s.storageIndex[address] 306 nextIndex := index.Next() 307 308 s.storageIndex[address] = nextIndex 309 return NewStorageID(address, nextIndex), nil 310 } 311 312 func (s *BasicSlabStorage) Retrieve(id StorageID) (Slab, bool, error) { 313 slab, ok := s.Slabs[id] 314 return slab, ok, nil 315 } 316 317 func (s *BasicSlabStorage) Store(id StorageID, slab Slab) error { 318 s.Slabs[id] = slab 319 return nil 320 } 321 322 func (s *BasicSlabStorage) Remove(id StorageID) error { 323 delete(s.Slabs, id) 324 return nil 325 } 326 327 func (s *BasicSlabStorage) Count() int { 328 return len(s.Slabs) 329 } 330 331 func (s *BasicSlabStorage) StorageIDs() []StorageID { 332 result := make([]StorageID, 0, len(s.Slabs)) 333 for storageID := range s.Slabs { 334 result = append(result, storageID) 335 } 336 return result 337 } 338 339 // Encode returns serialized slabs in storage. 340 // This is currently used for testing. 341 func (s *BasicSlabStorage) Encode() (map[StorageID][]byte, error) { 342 m := make(map[StorageID][]byte) 343 for id, slab := range s.Slabs { 344 b, err := Encode(slab, s.cborEncMode) 345 if err != nil { 346 // err is already categorized by Encode(). 347 return nil, err 348 } 349 m[id] = b 350 } 351 return m, nil 352 } 353 354 func (s *BasicSlabStorage) SlabIterator() (SlabIterator, error) { 355 var slabs []struct { 356 StorageID 357 Slab 358 } 359 360 for id, slab := range s.Slabs { 361 slabs = append(slabs, struct { 362 StorageID 363 Slab 364 }{ 365 StorageID: id, 366 Slab: slab, 367 }) 368 } 369 370 var i int 371 372 return func() (StorageID, Slab) { 373 if i >= len(slabs) { 374 return StorageIDUndefined, nil 375 } 376 slabEntry := slabs[i] 377 i++ 378 return slabEntry.StorageID, slabEntry.Slab 379 }, nil 380 } 381 382 // CheckStorageHealth checks for the health of slab storage. 383 // It traverses the slabs and checks these factors: 384 // - All non-root slabs only has a single parent reference (no double referencing) 385 // - Every child of a parent shares the same ownership (childStorageID.Address == parentStorageID.Address) 386 // - The number of root slabs are equal to the expected number (skipped if expectedNumberOfRootSlabs is -1) 387 // This should be used for testing purposes only, as it might be slow to process 388 func CheckStorageHealth(storage SlabStorage, expectedNumberOfRootSlabs int) (map[StorageID]struct{}, error) { 389 parentOf := make(map[StorageID]StorageID) 390 leaves := make([]StorageID, 0) 391 392 slabIterator, err := storage.SlabIterator() 393 if err != nil { 394 // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 395 return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create slab iterator") 396 } 397 398 slabs := map[StorageID]Slab{} 399 400 for { 401 id, slab := slabIterator() 402 if id == StorageIDUndefined { 403 break 404 } 405 406 if _, ok := slabs[id]; ok { 407 return nil, NewFatalError(fmt.Errorf("duplicate slab %s", id)) 408 } 409 slabs[id] = slab 410 411 atLeastOneExternalSlab := false 412 childStorables := slab.ChildStorables() 413 414 for len(childStorables) > 0 { 415 416 var next []Storable 417 418 for _, s := range childStorables { 419 420 if sids, ok := s.(StorageIDStorable); ok { 421 sid := StorageID(sids) 422 if _, found := parentOf[sid]; found { 423 return nil, NewFatalError(fmt.Errorf("two parents are captured for the slab %s", sid)) 424 } 425 parentOf[sid] = id 426 atLeastOneExternalSlab = true 427 } 428 429 next = append(next, s.ChildStorables()...) 430 } 431 432 childStorables = next 433 } 434 435 if !atLeastOneExternalSlab { 436 leaves = append(leaves, id) 437 } 438 } 439 440 rootsMap := make(map[StorageID]struct{}) 441 visited := make(map[StorageID]struct{}) 442 var id StorageID 443 for _, leaf := range leaves { 444 id = leaf 445 if _, ok := visited[id]; ok { 446 return nil, NewFatalError(fmt.Errorf("at least two references found to the leaf slab %s", id)) 447 } 448 visited[id] = struct{}{} 449 for { 450 parentID, found := parentOf[id] 451 if !found { 452 // we reach the root 453 rootsMap[id] = struct{}{} 454 break 455 } 456 visited[parentID] = struct{}{} 457 458 childSlab, ok, err := storage.Retrieve(id) 459 if !ok { 460 return nil, NewSlabNotFoundErrorf(id, "failed to get child slab") 461 } 462 if err != nil { 463 // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 464 return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve child slab %s", id)) 465 } 466 467 parentSlab, ok, err := storage.Retrieve(parentID) 468 if !ok { 469 return nil, NewSlabNotFoundErrorf(id, "failed to get parent slab") 470 } 471 if err != nil { 472 // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 473 return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve parent slab %s", parentID)) 474 } 475 476 childOwner := childSlab.ID().Address 477 parentOwner := parentSlab.ID().Address 478 479 if childOwner != parentOwner { 480 return nil, NewFatalError( 481 fmt.Errorf( 482 "parent and child are not owned by the same account: child.owner %s, parent.owner %s", 483 childOwner, 484 parentOwner, 485 )) 486 } 487 id = parentID 488 } 489 } 490 491 if len(visited) != len(slabs) { 492 493 var unreachableID StorageID 494 var unreachableSlab Slab 495 496 for id, slab := range slabs { 497 if _, ok := visited[id]; !ok { 498 unreachableID = id 499 unreachableSlab = slab 500 break 501 } 502 } 503 504 return nil, NewFatalError( 505 fmt.Errorf( 506 "slab was not reachable from leaves: %s: %s", 507 unreachableID, 508 unreachableSlab, 509 )) 510 } 511 512 if (expectedNumberOfRootSlabs >= 0) && (len(rootsMap) != expectedNumberOfRootSlabs) { 513 return nil, NewFatalError( 514 fmt.Errorf( 515 "number of root slabs doesn't match: expected %d, got %d", 516 expectedNumberOfRootSlabs, 517 len(rootsMap), 518 )) 519 } 520 521 return rootsMap, nil 522 } 523 524 type PersistentSlabStorage struct { 525 baseStorage BaseStorage 526 cache map[StorageID]Slab 527 deltas map[StorageID]Slab 528 tempStorageIndex uint64 529 DecodeStorable StorableDecoder 530 DecodeTypeInfo TypeInfoDecoder 531 cborEncMode cbor.EncMode 532 cborDecMode cbor.DecMode 533 } 534 535 var _ SlabStorage = &PersistentSlabStorage{} 536 537 func (s *PersistentSlabStorage) SlabIterator() (SlabIterator, error) { 538 539 var slabs []struct { 540 StorageID 541 Slab 542 } 543 544 appendChildStorables := func(slab Slab) error { 545 childStorables := slab.ChildStorables() 546 547 for len(childStorables) > 0 { 548 549 var nextChildStorables []Storable 550 551 for _, childStorable := range childStorables { 552 553 storageIDStorable, ok := childStorable.(StorageIDStorable) 554 if !ok { 555 continue 556 } 557 558 id := StorageID(storageIDStorable) 559 560 if _, ok := s.deltas[id]; ok { 561 continue 562 } 563 564 if _, ok := s.cache[id]; ok { 565 continue 566 } 567 568 var err error 569 slab, ok, err = s.RetrieveIgnoringDeltas(id) 570 if !ok { 571 return NewSlabNotFoundErrorf(id, "slab not found during slab iteration") 572 } 573 if err != nil { 574 return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) 575 } 576 577 slabs = append(slabs, struct { 578 StorageID 579 Slab 580 }{ 581 StorageID: id, 582 Slab: slab, 583 }) 584 585 nextChildStorables = append( 586 nextChildStorables, 587 slab.ChildStorables()..., 588 ) 589 } 590 591 childStorables = nextChildStorables 592 } 593 594 return nil 595 } 596 597 appendSlab := func(id StorageID, slab Slab) error { 598 slabs = append(slabs, struct { 599 StorageID 600 Slab 601 }{ 602 StorageID: id, 603 Slab: slab, 604 }) 605 606 return appendChildStorables(slab) 607 } 608 609 for id, slab := range s.deltas { 610 if slab == nil { 611 continue 612 } 613 614 err := appendSlab(id, slab) 615 if err != nil { 616 return nil, err 617 } 618 } 619 620 // Create a temporary copy of all the cached IDs, 621 // as s.cache will get mutated inside the for-loop 622 623 var cached []StorageID 624 for id := range s.cache { 625 cached = append(cached, id) 626 } 627 628 for _, id := range cached { 629 slab := s.cache[id] 630 631 if slab == nil { 632 continue 633 } 634 635 if _, ok := s.deltas[id]; ok { 636 continue 637 } 638 639 err := appendSlab(id, slab) 640 if err != nil { 641 return nil, err 642 } 643 } 644 645 var i int 646 647 return func() (StorageID, Slab) { 648 if i >= len(slabs) { 649 return StorageIDUndefined, nil 650 } 651 slabEntry := slabs[i] 652 i++ 653 return slabEntry.StorageID, slabEntry.Slab 654 }, nil 655 } 656 657 type StorageOption func(st *PersistentSlabStorage) *PersistentSlabStorage 658 659 func NewPersistentSlabStorage( 660 base BaseStorage, 661 cborEncMode cbor.EncMode, 662 cborDecMode cbor.DecMode, 663 decodeStorable StorableDecoder, 664 decodeTypeInfo TypeInfoDecoder, 665 opts ...StorageOption, 666 ) *PersistentSlabStorage { 667 storage := &PersistentSlabStorage{baseStorage: base, 668 cache: make(map[StorageID]Slab), 669 deltas: make(map[StorageID]Slab), 670 cborEncMode: cborEncMode, 671 cborDecMode: cborDecMode, 672 DecodeStorable: decodeStorable, 673 DecodeTypeInfo: decodeTypeInfo, 674 } 675 676 for _, applyOption := range opts { 677 storage = applyOption(storage) 678 } 679 680 return storage 681 } 682 683 func (s *PersistentSlabStorage) GenerateStorageID(address Address) (StorageID, error) { 684 if address == AddressUndefined { 685 var idx StorageIndex 686 s.tempStorageIndex++ 687 binary.BigEndian.PutUint64(idx[:], s.tempStorageIndex) 688 return NewStorageID(address, idx), nil 689 } 690 id, err := s.baseStorage.GenerateStorageID(address) 691 if err != nil { 692 // Wrap err as external error (if needed) because err is returned by BaseStorage interface. 693 return StorageID{}, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to generate storage ID for address 0x%x", address)) 694 } 695 return id, nil 696 } 697 698 func (s *PersistentSlabStorage) sortedOwnedDeltaKeys() []StorageID { 699 keysWithOwners := make([]StorageID, 0, len(s.deltas)) 700 for k := range s.deltas { 701 // ignore the ones that are not owned by accounts 702 if k.Address != AddressUndefined { 703 keysWithOwners = append(keysWithOwners, k) 704 } 705 } 706 707 sort.Slice(keysWithOwners, func(i, j int) bool { 708 a := keysWithOwners[i] 709 b := keysWithOwners[j] 710 if a.Address == b.Address { 711 return a.IndexAsUint64() < b.IndexAsUint64() 712 } 713 return a.AddressAsUint64() < b.AddressAsUint64() 714 }) 715 return keysWithOwners 716 } 717 718 func (s *PersistentSlabStorage) Commit() error { 719 var err error 720 721 // this part ensures the keys are sorted so commit operation is deterministic 722 keysWithOwners := s.sortedOwnedDeltaKeys() 723 724 for _, id := range keysWithOwners { 725 slab := s.deltas[id] 726 727 // deleted slabs 728 if slab == nil { 729 err = s.baseStorage.Remove(id) 730 if err != nil { 731 // Wrap err as external error (if needed) because err is returned by BaseStorage interface. 732 return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) 733 } 734 // Deleted slabs are removed from deltas and added to read cache so that: 735 // 1. next read is from in-memory read cache 736 // 2. deleted slabs are not re-committed in next commit 737 s.cache[id] = nil 738 delete(s.deltas, id) 739 continue 740 } 741 742 // serialize 743 data, err := Encode(slab, s.cborEncMode) 744 if err != nil { 745 // err is categorized already by Encode() 746 return err 747 } 748 749 // store 750 err = s.baseStorage.Store(id, data) 751 if err != nil { 752 // Wrap err as external error (if needed) because err is returned by BaseStorage interface. 753 return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) 754 } 755 756 // add to read cache 757 s.cache[id] = slab 758 // It's safe to remove slab from deltas because 759 // iteration is on non-temp slabs and temp slabs 760 // are still in deltas. 761 delete(s.deltas, id) 762 } 763 764 // Do NOT reset deltas because slabs with empty address are not saved. 765 766 return nil 767 } 768 769 func (s *PersistentSlabStorage) FastCommit(numWorkers int) error { 770 771 // this part ensures the keys are sorted so commit operation is deterministic 772 keysWithOwners := s.sortedOwnedDeltaKeys() 773 774 if len(keysWithOwners) == 0 { 775 return nil 776 } 777 778 // limit the number of workers to the number of keys 779 if numWorkers > len(keysWithOwners) { 780 numWorkers = len(keysWithOwners) 781 } 782 783 // construct job queue 784 jobs := make(chan StorageID, len(keysWithOwners)) 785 for _, id := range keysWithOwners { 786 jobs <- id 787 } 788 close(jobs) 789 790 type encodedSlabs struct { 791 storageID StorageID 792 data []byte 793 err error 794 } 795 796 // construct result queue 797 results := make(chan *encodedSlabs, len(keysWithOwners)) 798 799 // define encoders (workers) and launch them 800 // encoders encodes slabs in parallel 801 encoder := func(wg *sync.WaitGroup, done <-chan struct{}, jobs <-chan StorageID, results chan<- *encodedSlabs) { 802 defer wg.Done() 803 804 for id := range jobs { 805 // Check if goroutine is signaled to stop before proceeding. 806 select { 807 case <-done: 808 return 809 default: 810 } 811 812 slab := s.deltas[id] 813 if slab == nil { 814 results <- &encodedSlabs{ 815 storageID: id, 816 data: nil, 817 err: nil, 818 } 819 continue 820 } 821 // serialize 822 data, err := Encode(slab, s.cborEncMode) 823 results <- &encodedSlabs{ 824 storageID: id, 825 data: data, 826 err: err, 827 } 828 } 829 } 830 831 done := make(chan struct{}) 832 833 var wg sync.WaitGroup 834 wg.Add(numWorkers) 835 836 for i := 0; i < numWorkers; i++ { 837 go encoder(&wg, done, jobs, results) 838 } 839 840 defer func() { 841 // This ensures that all goroutines are stopped before output channel is closed. 842 843 // Wait for all goroutines to finish 844 wg.Wait() 845 846 // Close output channel 847 close(results) 848 }() 849 850 // process the results while encoders are working 851 // we need to capture them inside a map 852 // again so we can apply them in order of keys 853 encSlabByID := make(map[StorageID][]byte) 854 for i := 0; i < len(keysWithOwners); i++ { 855 result := <-results 856 // if any error return 857 if result.err != nil { 858 // Closing done channel signals goroutines to stop. 859 close(done) 860 // result.err is already categorized by Encode(). 861 return result.err 862 } 863 encSlabByID[result.storageID] = result.data 864 } 865 866 // at this stage all results has been processed 867 // and ready to be passed to base storage layer 868 for _, id := range keysWithOwners { 869 data := encSlabByID[id] 870 871 var err error 872 // deleted slabs 873 if data == nil { 874 err = s.baseStorage.Remove(id) 875 if err != nil { 876 // Wrap err as external error (if needed) because err is returned by BaseStorage interface. 877 return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) 878 } 879 // Deleted slabs are removed from deltas and added to read cache so that: 880 // 1. next read is from in-memory read cache 881 // 2. deleted slabs are not re-committed in next commit 882 s.cache[id] = nil 883 delete(s.deltas, id) 884 continue 885 } 886 887 // store 888 err = s.baseStorage.Store(id, data) 889 if err != nil { 890 // Wrap err as external error (if needed) because err is returned by BaseStorage interface. 891 return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) 892 } 893 894 s.cache[id] = s.deltas[id] 895 // It's safe to remove slab from deltas because 896 // iteration is on non-temp slabs and temp slabs 897 // are still in deltas. 898 delete(s.deltas, id) 899 } 900 901 // Do NOT reset deltas because slabs with empty address are not saved. 902 903 return nil 904 } 905 906 func (s *PersistentSlabStorage) DropDeltas() { 907 s.deltas = make(map[StorageID]Slab) 908 } 909 910 func (s *PersistentSlabStorage) DropCache() { 911 s.cache = make(map[StorageID]Slab) 912 } 913 914 func (s *PersistentSlabStorage) RetrieveIgnoringDeltas(id StorageID) (Slab, bool, error) { 915 916 // check the read cache next 917 if slab, ok := s.cache[id]; ok { 918 return slab, slab != nil, nil 919 } 920 921 // fetch from base storage last 922 data, ok, err := s.baseStorage.Retrieve(id) 923 if err != nil { 924 // Wrap err as external error (if needed) because err is returned by BaseStorage interface. 925 return nil, ok, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) 926 } 927 if !ok { 928 return nil, ok, nil 929 } 930 931 slab, err := DecodeSlab(id, data, s.cborDecMode, s.DecodeStorable, s.DecodeTypeInfo) 932 if err != nil { 933 // err is already categorized by DecodeSlab(). 934 return nil, ok, err 935 } 936 937 // save decoded slab to cache 938 s.cache[id] = slab 939 940 return slab, ok, nil 941 } 942 943 func (s *PersistentSlabStorage) Retrieve(id StorageID) (Slab, bool, error) { 944 // check deltas first 945 if slab, ok := s.deltas[id]; ok { 946 return slab, slab != nil, nil 947 } 948 949 // Don't need to wrap error as external error because err is already categorized by PersistentSlabStorage.RetrieveIgnoringDeltas(). 950 return s.RetrieveIgnoringDeltas(id) 951 } 952 953 func (s *PersistentSlabStorage) Store(id StorageID, slab Slab) error { 954 // add to deltas 955 s.deltas[id] = slab 956 return nil 957 } 958 959 func (s *PersistentSlabStorage) Remove(id StorageID) error { 960 // add to nil to deltas under that id 961 s.deltas[id] = nil 962 return nil 963 } 964 965 // Warning Counts doesn't consider new segments in the deltas and only returns committed values 966 func (s *PersistentSlabStorage) Count() int { 967 return s.baseStorage.SegmentCounts() 968 } 969 970 // Deltas returns number of uncommitted slabs, including slabs with temp addresses. 971 func (s *PersistentSlabStorage) Deltas() uint { 972 return uint(len(s.deltas)) 973 } 974 975 // DeltasWithoutTempAddresses returns number of uncommitted slabs, excluding slabs with temp addresses. 976 func (s *PersistentSlabStorage) DeltasWithoutTempAddresses() uint { 977 deltas := uint(0) 978 for k := range s.deltas { 979 // exclude the ones that are not owned by accounts 980 if k.Address != AddressUndefined { 981 deltas++ 982 } 983 } 984 return deltas 985 } 986 987 // DeltasSizeWithoutTempAddresses returns total size of uncommitted slabs (in bytes), excluding slabs with temp addresses. 988 func (s *PersistentSlabStorage) DeltasSizeWithoutTempAddresses() uint64 { 989 size := uint64(0) 990 for k, slab := range s.deltas { 991 // Exclude slabs that are not owned by accounts. 992 if k.Address == AddressUndefined || slab == nil { 993 continue 994 } 995 996 size += uint64(slab.ByteSize()) 997 } 998 return size 999 }