github.com/daeglee/go-ethereum@v0.0.0-20190504220456-cad3e8d18e9b/swarm/storage/ldbstore_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package storage 18 19 import ( 20 "bytes" 21 "context" 22 "encoding/binary" 23 "fmt" 24 "io/ioutil" 25 "os" 26 "strconv" 27 "strings" 28 "testing" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/swarm/chunk" 32 "github.com/ethereum/go-ethereum/swarm/log" 33 "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" 34 ldberrors "github.com/syndtr/goleveldb/leveldb/errors" 35 ) 36 37 type testDbStore struct { 38 *LDBStore 39 dir string 40 } 41 42 func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) { 43 dir, err := ioutil.TempDir("", "bzz-storage-test") 44 if err != nil { 45 return nil, func() {}, err 46 } 47 48 var db *LDBStore 49 storeparams := NewDefaultStoreParams() 50 params := NewLDBStoreParams(storeparams, dir) 51 params.Po = testPoFunc 52 53 if mock { 54 globalStore := mem.NewGlobalStore() 55 addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed") 56 mockStore := globalStore.NewNodeStore(addr) 57 58 db, err = NewMockDbStore(params, mockStore) 59 } else { 60 db, err = NewLDBStore(params) 61 } 62 63 cleanup := func() { 64 if db != nil { 65 db.Close() 66 } 67 err = os.RemoveAll(dir) 68 if err != nil { 69 panic(fmt.Sprintf("db cleanup failed: %v", err)) 70 } 71 } 72 73 return &testDbStore{db, dir}, cleanup, err 74 } 75 76 func testPoFunc(k Address) (ret uint8) { 77 basekey := make([]byte, 32) 78 return uint8(Proximity(basekey, k[:])) 79 } 80 81 func testDbStoreRandom(n int, mock bool, t *testing.T) { 82 db, cleanup, err := newTestDbStore(mock, true) 83 defer cleanup() 84 if err != nil { 85 t.Fatalf("init dbStore failed: %v", err) 86 } 87 testStoreRandom(db, n, t) 88 } 89 90 func testDbStoreCorrect(n int, mock bool, t *testing.T) { 91 db, cleanup, err := newTestDbStore(mock, false) 92 defer cleanup() 93 if err != nil { 94 t.Fatalf("init dbStore failed: %v", err) 95 } 96 testStoreCorrect(db, n, t) 97 } 98 99 func TestMarkAccessed(t *testing.T) { 100 db, cleanup, err := newTestDbStore(false, true) 101 defer cleanup() 102 if err != nil { 103 t.Fatalf("init dbStore failed: %v", err) 104 } 105 106 h := GenerateRandomChunk(chunk.DefaultSize) 107 108 db.Put(context.Background(), h) 109 110 var index dpaDBIndex 111 addr := h.Address() 112 idxk := getIndexKey(addr) 113 114 idata, err := db.db.Get(idxk) 115 if err != nil { 116 t.Fatal(err) 117 } 118 decodeIndex(idata, &index) 119 120 if index.Access != 0 { 121 t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access) 122 } 123 124 db.MarkAccessed(addr) 125 db.writeCurrentBatch() 126 127 idata, err = db.db.Get(idxk) 128 if err != nil { 129 t.Fatal(err) 130 } 131 decodeIndex(idata, &index) 132 133 if index.Access != 1 { 134 t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access) 135 } 136 137 } 138 139 func TestDbStoreRandom_1(t *testing.T) { 140 testDbStoreRandom(1, false, t) 141 } 142 143 func TestDbStoreCorrect_1(t *testing.T) { 144 testDbStoreCorrect(1, false, t) 145 } 146 147 func TestDbStoreRandom_1k(t *testing.T) { 148 testDbStoreRandom(1000, false, t) 149 } 150 151 func TestDbStoreCorrect_1k(t *testing.T) { 152 testDbStoreCorrect(1000, false, t) 153 } 154 155 func TestMockDbStoreRandom_1(t *testing.T) { 156 testDbStoreRandom(1, true, t) 157 } 158 159 func TestMockDbStoreCorrect_1(t *testing.T) { 160 testDbStoreCorrect(1, true, t) 161 } 162 163 func TestMockDbStoreRandom_1k(t *testing.T) { 164 testDbStoreRandom(1000, true, t) 165 } 166 167 func TestMockDbStoreCorrect_1k(t *testing.T) { 168 testDbStoreCorrect(1000, true, t) 169 } 170 171 func testDbStoreNotFound(t *testing.T, mock bool) { 172 db, cleanup, err := newTestDbStore(mock, false) 173 defer cleanup() 174 if err != nil { 175 t.Fatalf("init dbStore failed: %v", err) 176 } 177 178 _, err = db.Get(context.TODO(), ZeroAddr) 179 if err != ErrChunkNotFound { 180 t.Errorf("Expected ErrChunkNotFound, got %v", err) 181 } 182 } 183 184 func TestDbStoreNotFound(t *testing.T) { 185 testDbStoreNotFound(t, false) 186 } 187 func TestMockDbStoreNotFound(t *testing.T) { 188 testDbStoreNotFound(t, true) 189 } 190 191 func testIterator(t *testing.T, mock bool) { 192 var i int 193 var poc uint 194 chunkcount := 32 195 chunkkeys := NewAddressCollection(chunkcount) 196 chunkkeysResults := NewAddressCollection(chunkcount) 197 198 db, cleanup, err := newTestDbStore(mock, false) 199 defer cleanup() 200 if err != nil { 201 t.Fatalf("init dbStore failed: %v", err) 202 } 203 204 chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount) 205 206 for i = 0; i < len(chunks); i++ { 207 chunkkeys[i] = chunks[i].Address() 208 err := db.Put(context.TODO(), chunks[i]) 209 if err != nil { 210 t.Fatalf("dbStore.Put failed: %v", err) 211 } 212 } 213 214 for i = 0; i < len(chunkkeys); i++ { 215 log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i])) 216 } 217 i = 0 218 for poc = 0; poc <= 255; poc++ { 219 err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool { 220 log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc))) 221 chunkkeysResults[n] = k 222 i++ 223 return true 224 }) 225 if err != nil { 226 t.Fatalf("Iterator call failed: %v", err) 227 } 228 } 229 230 for i = 0; i < chunkcount; i++ { 231 if !bytes.Equal(chunkkeys[i], chunkkeysResults[i]) { 232 t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeysResults[i]) 233 } 234 } 235 236 } 237 238 func TestIterator(t *testing.T) { 239 testIterator(t, false) 240 } 241 func TestMockIterator(t *testing.T) { 242 testIterator(t, true) 243 } 244 245 func benchmarkDbStorePut(n int, mock bool, b *testing.B) { 246 db, cleanup, err := newTestDbStore(mock, true) 247 defer cleanup() 248 if err != nil { 249 b.Fatalf("init dbStore failed: %v", err) 250 } 251 benchmarkStorePut(db, n, b) 252 } 253 254 func benchmarkDbStoreGet(n int, mock bool, b *testing.B) { 255 db, cleanup, err := newTestDbStore(mock, true) 256 defer cleanup() 257 if err != nil { 258 b.Fatalf("init dbStore failed: %v", err) 259 } 260 benchmarkStoreGet(db, n, b) 261 } 262 263 func BenchmarkDbStorePut_500(b *testing.B) { 264 benchmarkDbStorePut(500, false, b) 265 } 266 267 func BenchmarkDbStoreGet_500(b *testing.B) { 268 benchmarkDbStoreGet(500, false, b) 269 } 270 271 func BenchmarkMockDbStorePut_500(b *testing.B) { 272 benchmarkDbStorePut(500, true, b) 273 } 274 275 func BenchmarkMockDbStoreGet_500(b *testing.B) { 276 benchmarkDbStoreGet(500, true, b) 277 } 278 279 // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and 280 // retrieve them, provided we don't hit the garbage collection 281 func TestLDBStoreWithoutCollectGarbage(t *testing.T) { 282 capacity := 50 283 n := 10 284 285 ldb, cleanup := newLDBStore(t) 286 ldb.setCapacity(uint64(capacity)) 287 defer cleanup() 288 289 chunks, err := mputRandomChunks(ldb, n) 290 if err != nil { 291 t.Fatal(err.Error()) 292 } 293 294 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 295 296 for _, ch := range chunks { 297 ret, err := ldb.Get(context.TODO(), ch.Address()) 298 if err != nil { 299 t.Fatal(err) 300 } 301 302 if !bytes.Equal(ret.Data(), ch.Data()) { 303 t.Fatal("expected to get the same data back, but got smth else") 304 } 305 } 306 307 if ldb.entryCnt != uint64(n) { 308 t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt) 309 } 310 311 if ldb.accessCnt != uint64(2*n) { 312 t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt) 313 } 314 } 315 316 // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and 317 // retrieve only some of them, because garbage collection must have partially cleared the store 318 // Also tests that we can delete chunks and that we can trigger garbage collection 319 func TestLDBStoreCollectGarbage(t *testing.T) { 320 321 // below max ronud 322 initialCap := defaultMaxGCRound / 100 323 cap := initialCap / 2 324 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 325 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 326 327 // at max round 328 cap = initialCap 329 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 330 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 331 332 // more than max around, not on threshold 333 cap = initialCap + 500 334 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 335 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 336 337 } 338 339 func testLDBStoreCollectGarbage(t *testing.T) { 340 params := strings.Split(t.Name(), "/") 341 capacity, err := strconv.Atoi(params[2]) 342 if err != nil { 343 t.Fatal(err) 344 } 345 n, err := strconv.Atoi(params[3]) 346 if err != nil { 347 t.Fatal(err) 348 } 349 350 ldb, cleanup := newLDBStore(t) 351 ldb.setCapacity(uint64(capacity)) 352 defer cleanup() 353 354 // retrieve the gc round target count for the db capacity 355 ldb.startGC(capacity) 356 roundTarget := ldb.gc.target 357 358 // split put counts to gc target count threshold, and wait for gc to finish in between 359 var allChunks []Chunk 360 remaining := n 361 for remaining > 0 { 362 var putCount int 363 if remaining < roundTarget { 364 putCount = remaining 365 } else { 366 putCount = roundTarget 367 } 368 remaining -= putCount 369 chunks, err := mputRandomChunks(ldb, putCount) 370 if err != nil { 371 t.Fatal(err.Error()) 372 } 373 allChunks = append(allChunks, chunks...) 374 ldb.lock.RLock() 375 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n) 376 ldb.lock.RUnlock() 377 378 waitGc(ldb) 379 } 380 381 // attempt gets on all put chunks 382 var missing int 383 for _, ch := range allChunks { 384 ret, err := ldb.Get(context.TODO(), ch.Address()) 385 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 386 missing++ 387 continue 388 } 389 if err != nil { 390 t.Fatal(err) 391 } 392 393 if !bytes.Equal(ret.Data(), ch.Data()) { 394 t.Fatal("expected to get the same data back, but got smth else") 395 } 396 397 log.Trace("got back chunk", "chunk", ret) 398 } 399 400 // all surplus chunks should be missing 401 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 402 if missing != expectMissing { 403 t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing) 404 } 405 406 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 407 } 408 409 // TestLDBStoreAddRemove tests that we can put and then delete a given chunk 410 func TestLDBStoreAddRemove(t *testing.T) { 411 ldb, cleanup := newLDBStore(t) 412 ldb.setCapacity(200) 413 defer cleanup() 414 415 n := 100 416 chunks, err := mputRandomChunks(ldb, n) 417 if err != nil { 418 t.Fatalf(err.Error()) 419 } 420 421 for i := 0; i < n; i++ { 422 // delete all even index chunks 423 if i%2 == 0 { 424 ldb.Delete(chunks[i].Address()) 425 } 426 } 427 428 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 429 430 for i := 0; i < n; i++ { 431 ret, err := ldb.Get(context.TODO(), chunks[i].Address()) 432 433 if i%2 == 0 { 434 // expect even chunks to be missing 435 if err == nil { 436 t.Fatal("expected chunk to be missing, but got no error") 437 } 438 } else { 439 // expect odd chunks to be retrieved successfully 440 if err != nil { 441 t.Fatalf("expected no error, but got %s", err) 442 } 443 444 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 445 t.Fatal("expected to get the same data back, but got smth else") 446 } 447 } 448 } 449 } 450 451 func testLDBStoreRemoveThenCollectGarbage(t *testing.T) { 452 t.Skip("flaky with -race flag") 453 454 params := strings.Split(t.Name(), "/") 455 capacity, err := strconv.Atoi(params[2]) 456 if err != nil { 457 t.Fatal(err) 458 } 459 n, err := strconv.Atoi(params[3]) 460 if err != nil { 461 t.Fatal(err) 462 } 463 464 ldb, cleanup := newLDBStore(t) 465 defer cleanup() 466 ldb.setCapacity(uint64(capacity)) 467 468 // put capacity count number of chunks 469 chunks := make([]Chunk, n) 470 for i := 0; i < n; i++ { 471 c := GenerateRandomChunk(chunk.DefaultSize) 472 chunks[i] = c 473 log.Trace("generate random chunk", "idx", i, "chunk", c) 474 } 475 476 for i := 0; i < n; i++ { 477 err := ldb.Put(context.TODO(), chunks[i]) 478 if err != nil { 479 t.Fatal(err) 480 } 481 } 482 483 waitGc(ldb) 484 485 // delete all chunks 486 // (only count the ones actually deleted, the rest will have been gc'd) 487 deletes := 0 488 for i := 0; i < n; i++ { 489 if ldb.Delete(chunks[i].Address()) == nil { 490 deletes++ 491 } 492 } 493 494 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 495 496 if ldb.entryCnt != 0 { 497 t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt) 498 } 499 500 // the manual deletes will have increased accesscnt, so we need to add this when we verify the current count 501 expAccessCnt := uint64(n) 502 if ldb.accessCnt != expAccessCnt { 503 t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt) 504 } 505 506 // retrieve the gc round target count for the db capacity 507 ldb.startGC(capacity) 508 roundTarget := ldb.gc.target 509 510 remaining := n 511 var puts int 512 for remaining > 0 { 513 var putCount int 514 if remaining < roundTarget { 515 putCount = remaining 516 } else { 517 putCount = roundTarget 518 } 519 remaining -= putCount 520 for putCount > 0 { 521 ldb.Put(context.TODO(), chunks[puts]) 522 ldb.lock.RLock() 523 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget) 524 ldb.lock.RUnlock() 525 puts++ 526 putCount-- 527 } 528 529 waitGc(ldb) 530 } 531 532 // expect first surplus chunks to be missing, because they have the smallest access value 533 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 534 for i := 0; i < expectMissing; i++ { 535 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 536 if err == nil { 537 t.Fatalf("expected surplus chunk %d to be missing, but got no error", i) 538 } 539 } 540 541 // expect last chunks to be present, as they have the largest access value 542 for i := expectMissing; i < n; i++ { 543 ret, err := ldb.Get(context.TODO(), chunks[i].Address()) 544 if err != nil { 545 t.Fatalf("chunk %v: expected no error, but got %s", i, err) 546 } 547 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 548 t.Fatal("expected to get the same data back, but got smth else") 549 } 550 } 551 } 552 553 // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount 554 func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) { 555 556 capacity := defaultMaxGCRound / 100 * 2 557 n := capacity - 1 558 559 ldb, cleanup := newLDBStore(t) 560 ldb.setCapacity(uint64(capacity)) 561 defer cleanup() 562 563 chunks, err := mputRandomChunks(ldb, n) 564 if err != nil { 565 t.Fatal(err.Error()) 566 } 567 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 568 569 // set first added capacity/2 chunks to highest accesscount 570 for i := 0; i < capacity/2; i++ { 571 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 572 if err != nil { 573 t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err) 574 } 575 } 576 _, err = mputRandomChunks(ldb, 2) 577 if err != nil { 578 t.Fatal(err.Error()) 579 } 580 581 // wait for garbage collection to kick in on the responsible actor 582 waitGc(ldb) 583 584 var missing int 585 for i, ch := range chunks[2 : capacity/2] { 586 ret, err := ldb.Get(context.TODO(), ch.Address()) 587 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 588 t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err) 589 } 590 591 if !bytes.Equal(ret.Data(), ch.Data()) { 592 t.Fatal("expected to get the same data back, but got smth else") 593 } 594 log.Trace("got back chunk", "chunk", ret) 595 } 596 597 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 598 } 599 600 func TestCleanIndex(t *testing.T) { 601 capacity := 5000 602 n := 3 603 604 ldb, cleanup := newLDBStore(t) 605 ldb.setCapacity(uint64(capacity)) 606 defer cleanup() 607 608 chunks, err := mputRandomChunks(ldb, n) 609 if err != nil { 610 t.Fatal(err) 611 } 612 613 // remove the data of the first chunk 614 po := ldb.po(chunks[0].Address()[:]) 615 dataKey := make([]byte, 10) 616 dataKey[0] = keyData 617 dataKey[1] = byte(po) 618 // dataKey[2:10] = first chunk has storageIdx 0 on [2:10] 619 if _, err := ldb.db.Get(dataKey); err != nil { 620 t.Fatal(err) 621 } 622 if err := ldb.db.Delete(dataKey); err != nil { 623 t.Fatal(err) 624 } 625 626 // remove the gc index row for the first chunk 627 gcFirstCorrectKey := make([]byte, 9) 628 gcFirstCorrectKey[0] = keyGCIdx 629 if err := ldb.db.Delete(gcFirstCorrectKey); err != nil { 630 t.Fatal(err) 631 } 632 633 // warp the gc data of the second chunk 634 // this data should be correct again after the clean 635 gcSecondCorrectKey := make([]byte, 9) 636 gcSecondCorrectKey[0] = keyGCIdx 637 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1)) 638 gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey) 639 if err != nil { 640 t.Fatal(err) 641 } 642 warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1) 643 copy(warpedGCVal[1:], gcSecondCorrectVal) 644 if err := ldb.db.Delete(gcSecondCorrectKey); err != nil { 645 t.Fatal(err) 646 } 647 if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil { 648 t.Fatal(err) 649 } 650 651 if err := ldb.CleanGCIndex(); err != nil { 652 t.Fatal(err) 653 } 654 655 // the index without corresponding data should have been deleted 656 idxKey := make([]byte, 33) 657 idxKey[0] = keyIndex 658 copy(idxKey[1:], chunks[0].Address()) 659 if _, err := ldb.db.Get(idxKey); err == nil { 660 t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey) 661 } 662 663 // the two other indices should be present 664 copy(idxKey[1:], chunks[1].Address()) 665 if _, err := ldb.db.Get(idxKey); err != nil { 666 t.Fatalf("expected chunk 1 idx to be present: %v", idxKey) 667 } 668 669 copy(idxKey[1:], chunks[2].Address()) 670 if _, err := ldb.db.Get(idxKey); err != nil { 671 t.Fatalf("expected chunk 2 idx to be present: %v", idxKey) 672 } 673 674 // first gc index should still be gone 675 if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil { 676 t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey) 677 } 678 679 // second gc index should still be fixed 680 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 681 t.Fatalf("expected gc 1 idx to be present: %v", idxKey) 682 } 683 684 // third gc index should be unchanged 685 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2)) 686 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 687 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 688 } 689 690 c, err := ldb.db.Get(keyEntryCnt) 691 if err != nil { 692 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 693 } 694 695 // entrycount should now be one less 696 entryCount := binary.BigEndian.Uint64(c) 697 if entryCount != 2 { 698 t.Fatalf("expected entrycnt to be 2, was %d", c) 699 } 700 701 // the chunks might accidentally be in the same bin 702 // if so that bin counter will now be 2 - the highest added index. 703 // if not, the total of them will be 3 704 poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())} 705 if poBins[0] == poBins[1] { 706 poBins = poBins[:1] 707 } 708 709 var binTotal uint64 710 var currentBin [2]byte 711 currentBin[0] = keyDistanceCnt 712 if len(poBins) == 1 { 713 currentBin[1] = poBins[0] 714 c, err := ldb.db.Get(currentBin[:]) 715 if err != nil { 716 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 717 } 718 binCount := binary.BigEndian.Uint64(c) 719 if binCount != 2 { 720 t.Fatalf("expected entrycnt to be 2, was %d", binCount) 721 } 722 } else { 723 for _, bin := range poBins { 724 currentBin[1] = bin 725 c, err := ldb.db.Get(currentBin[:]) 726 if err != nil { 727 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 728 } 729 binCount := binary.BigEndian.Uint64(c) 730 binTotal += binCount 731 732 } 733 if binTotal != 3 { 734 t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal) 735 } 736 } 737 738 // check that the iterator quits properly 739 chunks, err = mputRandomChunks(ldb, 4100) 740 if err != nil { 741 t.Fatal(err) 742 } 743 744 po = ldb.po(chunks[4099].Address()[:]) 745 dataKey = make([]byte, 10) 746 dataKey[0] = keyData 747 dataKey[1] = byte(po) 748 binary.BigEndian.PutUint64(dataKey[2:], 4099+3) 749 if _, err := ldb.db.Get(dataKey); err != nil { 750 t.Fatal(err) 751 } 752 if err := ldb.db.Delete(dataKey); err != nil { 753 t.Fatal(err) 754 } 755 756 if err := ldb.CleanGCIndex(); err != nil { 757 t.Fatal(err) 758 } 759 760 // entrycount should now be one less of added chunks 761 c, err = ldb.db.Get(keyEntryCnt) 762 if err != nil { 763 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 764 } 765 entryCount = binary.BigEndian.Uint64(c) 766 if entryCount != 4099+2 { 767 t.Fatalf("expected entrycnt to be 2, was %d", c) 768 } 769 } 770 771 // Note: waitGc does not guarantee that we wait 1 GC round; it only 772 // guarantees that if the GC is running we wait for that run to finish 773 // ticket: https://github.com/ethersphere/go-ethereum/issues/1151 774 func waitGc(ldb *LDBStore) { 775 <-ldb.gc.runC 776 ldb.gc.runC <- struct{}{} 777 }