github.com/letterj/go-ethereum@v1.8.22-0.20190204142846-520024dfd689/swarm/storage/ldbstore_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package storage 18 19 import ( 20 "bytes" 21 "context" 22 "encoding/binary" 23 "fmt" 24 "io/ioutil" 25 "os" 26 "strconv" 27 "strings" 28 "testing" 29 30 "github.com/ethereum/go-ethereum/common" 31 ch "github.com/ethereum/go-ethereum/swarm/chunk" 32 "github.com/ethereum/go-ethereum/swarm/log" 33 "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" 34 ldberrors "github.com/syndtr/goleveldb/leveldb/errors" 35 ) 36 37 type testDbStore struct { 38 *LDBStore 39 dir string 40 } 41 42 func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) { 43 dir, err := ioutil.TempDir("", "bzz-storage-test") 44 if err != nil { 45 return nil, func() {}, err 46 } 47 48 var db *LDBStore 49 storeparams := NewDefaultStoreParams() 50 params := NewLDBStoreParams(storeparams, dir) 51 params.Po = testPoFunc 52 53 if mock { 54 globalStore := mem.NewGlobalStore() 55 addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed") 56 mockStore := globalStore.NewNodeStore(addr) 57 58 db, err = NewMockDbStore(params, mockStore) 59 } else { 60 db, err = NewLDBStore(params) 61 } 62 63 cleanup := func() { 64 if db != nil { 65 db.Close() 66 } 67 err = os.RemoveAll(dir) 68 if err != nil { 69 panic(fmt.Sprintf("db cleanup failed: %v", err)) 70 } 71 } 72 73 return &testDbStore{db, dir}, cleanup, err 74 } 75 76 func testPoFunc(k Address) (ret uint8) { 77 basekey := make([]byte, 32) 78 return uint8(Proximity(basekey, k[:])) 79 } 80 81 func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) { 82 db, cleanup, err := newTestDbStore(mock, true) 83 defer cleanup() 84 if err != nil { 85 t.Fatalf("init dbStore failed: %v", err) 86 } 87 testStoreRandom(db, n, chunksize, t) 88 } 89 90 func testDbStoreCorrect(n int, chunksize int64, mock bool, t *testing.T) { 91 db, cleanup, err := newTestDbStore(mock, false) 92 defer cleanup() 93 if err != nil { 94 t.Fatalf("init dbStore failed: %v", err) 95 } 96 testStoreCorrect(db, n, chunksize, t) 97 } 98 99 func TestMarkAccessed(t *testing.T) { 100 db, cleanup, err := newTestDbStore(false, true) 101 defer cleanup() 102 if err != nil { 103 t.Fatalf("init dbStore failed: %v", err) 104 } 105 106 h := GenerateRandomChunk(ch.DefaultSize) 107 108 db.Put(context.Background(), h) 109 110 var index dpaDBIndex 111 addr := h.Address() 112 idxk := getIndexKey(addr) 113 114 idata, err := db.db.Get(idxk) 115 if err != nil { 116 t.Fatal(err) 117 } 118 decodeIndex(idata, &index) 119 120 if index.Access != 0 { 121 t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access) 122 } 123 124 db.MarkAccessed(addr) 125 db.writeCurrentBatch() 126 127 idata, err = db.db.Get(idxk) 128 if err != nil { 129 t.Fatal(err) 130 } 131 decodeIndex(idata, &index) 132 133 if index.Access != 1 { 134 t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access) 135 } 136 137 } 138 139 func TestDbStoreRandom_1(t *testing.T) { 140 testDbStoreRandom(1, 0, false, t) 141 } 142 143 func TestDbStoreCorrect_1(t *testing.T) { 144 testDbStoreCorrect(1, 4096, false, t) 145 } 146 147 func TestDbStoreRandom_1k(t *testing.T) { 148 testDbStoreRandom(1000, 0, false, t) 149 } 150 151 func TestDbStoreCorrect_1k(t *testing.T) { 152 testDbStoreCorrect(1000, 4096, false, t) 153 } 154 155 func TestMockDbStoreRandom_1(t *testing.T) { 156 testDbStoreRandom(1, 0, true, t) 157 } 158 159 func TestMockDbStoreCorrect_1(t *testing.T) { 160 testDbStoreCorrect(1, 4096, true, t) 161 } 162 163 func TestMockDbStoreRandom_1k(t *testing.T) { 164 testDbStoreRandom(1000, 0, true, t) 165 } 166 167 func TestMockDbStoreCorrect_1k(t *testing.T) { 168 testDbStoreCorrect(1000, 4096, true, t) 169 } 170 171 func testDbStoreNotFound(t *testing.T, mock bool) { 172 db, cleanup, err := newTestDbStore(mock, false) 173 defer cleanup() 174 if err != nil { 175 t.Fatalf("init dbStore failed: %v", err) 176 } 177 178 _, err = db.Get(context.TODO(), ZeroAddr) 179 if err != ErrChunkNotFound { 180 t.Errorf("Expected ErrChunkNotFound, got %v", err) 181 } 182 } 183 184 func TestDbStoreNotFound(t *testing.T) { 185 testDbStoreNotFound(t, false) 186 } 187 func TestMockDbStoreNotFound(t *testing.T) { 188 testDbStoreNotFound(t, true) 189 } 190 191 func testIterator(t *testing.T, mock bool) { 192 var chunkcount int = 32 193 var i int 194 var poc uint 195 chunkkeys := NewAddressCollection(chunkcount) 196 chunkkeys_results := NewAddressCollection(chunkcount) 197 198 db, cleanup, err := newTestDbStore(mock, false) 199 defer cleanup() 200 if err != nil { 201 t.Fatalf("init dbStore failed: %v", err) 202 } 203 204 chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount) 205 206 for i = 0; i < len(chunks); i++ { 207 chunkkeys[i] = chunks[i].Address() 208 err := db.Put(context.TODO(), chunks[i]) 209 if err != nil { 210 t.Fatalf("dbStore.Put failed: %v", err) 211 } 212 } 213 214 for i = 0; i < len(chunkkeys); i++ { 215 log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i])) 216 } 217 i = 0 218 for poc = 0; poc <= 255; poc++ { 219 err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool { 220 log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc))) 221 chunkkeys_results[n] = k 222 i++ 223 return true 224 }) 225 if err != nil { 226 t.Fatalf("Iterator call failed: %v", err) 227 } 228 } 229 230 for i = 0; i < chunkcount; i++ { 231 if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) { 232 t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i]) 233 } 234 } 235 236 } 237 238 func TestIterator(t *testing.T) { 239 testIterator(t, false) 240 } 241 func TestMockIterator(t *testing.T) { 242 testIterator(t, true) 243 } 244 245 func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) { 246 db, cleanup, err := newTestDbStore(mock, true) 247 defer cleanup() 248 if err != nil { 249 b.Fatalf("init dbStore failed: %v", err) 250 } 251 benchmarkStorePut(db, n, chunksize, b) 252 } 253 254 func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) { 255 db, cleanup, err := newTestDbStore(mock, true) 256 defer cleanup() 257 if err != nil { 258 b.Fatalf("init dbStore failed: %v", err) 259 } 260 benchmarkStoreGet(db, n, chunksize, b) 261 } 262 263 func BenchmarkDbStorePut_1_500(b *testing.B) { 264 benchmarkDbStorePut(500, 1, 4096, false, b) 265 } 266 267 func BenchmarkDbStorePut_8_500(b *testing.B) { 268 benchmarkDbStorePut(500, 8, 4096, false, b) 269 } 270 271 func BenchmarkDbStoreGet_1_500(b *testing.B) { 272 benchmarkDbStoreGet(500, 1, 4096, false, b) 273 } 274 275 func BenchmarkDbStoreGet_8_500(b *testing.B) { 276 benchmarkDbStoreGet(500, 8, 4096, false, b) 277 } 278 279 func BenchmarkMockDbStorePut_1_500(b *testing.B) { 280 benchmarkDbStorePut(500, 1, 4096, true, b) 281 } 282 283 func BenchmarkMockDbStorePut_8_500(b *testing.B) { 284 benchmarkDbStorePut(500, 8, 4096, true, b) 285 } 286 287 func BenchmarkMockDbStoreGet_1_500(b *testing.B) { 288 benchmarkDbStoreGet(500, 1, 4096, true, b) 289 } 290 291 func BenchmarkMockDbStoreGet_8_500(b *testing.B) { 292 benchmarkDbStoreGet(500, 8, 4096, true, b) 293 } 294 295 // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and 296 // retrieve them, provided we don't hit the garbage collection 297 func TestLDBStoreWithoutCollectGarbage(t *testing.T) { 298 capacity := 50 299 n := 10 300 301 ldb, cleanup := newLDBStore(t) 302 ldb.setCapacity(uint64(capacity)) 303 defer cleanup() 304 305 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 306 if err != nil { 307 t.Fatal(err.Error()) 308 } 309 310 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 311 312 for _, ch := range chunks { 313 ret, err := ldb.Get(context.TODO(), ch.Address()) 314 if err != nil { 315 t.Fatal(err) 316 } 317 318 if !bytes.Equal(ret.Data(), ch.Data()) { 319 t.Fatal("expected to get the same data back, but got smth else") 320 } 321 } 322 323 if ldb.entryCnt != uint64(n) { 324 t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt) 325 } 326 327 if ldb.accessCnt != uint64(2*n) { 328 t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt) 329 } 330 } 331 332 // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and 333 // retrieve only some of them, because garbage collection must have partially cleared the store 334 // Also tests that we can delete chunks and that we can trigger garbage collection 335 func TestLDBStoreCollectGarbage(t *testing.T) { 336 337 // below max ronud 338 initialCap := defaultMaxGCRound / 100 339 cap := initialCap / 2 340 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 341 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 342 343 // at max round 344 cap = initialCap 345 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 346 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 347 348 // more than max around, not on threshold 349 cap = initialCap + 500 350 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 351 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 352 353 } 354 355 func testLDBStoreCollectGarbage(t *testing.T) { 356 params := strings.Split(t.Name(), "/") 357 capacity, err := strconv.Atoi(params[2]) 358 if err != nil { 359 t.Fatal(err) 360 } 361 n, err := strconv.Atoi(params[3]) 362 if err != nil { 363 t.Fatal(err) 364 } 365 366 ldb, cleanup := newLDBStore(t) 367 ldb.setCapacity(uint64(capacity)) 368 defer cleanup() 369 370 // retrieve the gc round target count for the db capacity 371 ldb.startGC(capacity) 372 roundTarget := ldb.gc.target 373 374 // split put counts to gc target count threshold, and wait for gc to finish in between 375 var allChunks []Chunk 376 remaining := n 377 for remaining > 0 { 378 var putCount int 379 if remaining < roundTarget { 380 putCount = remaining 381 } else { 382 putCount = roundTarget 383 } 384 remaining -= putCount 385 chunks, err := mputRandomChunks(ldb, putCount, int64(ch.DefaultSize)) 386 if err != nil { 387 t.Fatal(err.Error()) 388 } 389 allChunks = append(allChunks, chunks...) 390 ldb.lock.RLock() 391 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n) 392 ldb.lock.RUnlock() 393 394 waitGc(ldb) 395 } 396 397 // attempt gets on all put chunks 398 var missing int 399 for _, ch := range allChunks { 400 ret, err := ldb.Get(context.TODO(), ch.Address()) 401 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 402 missing++ 403 continue 404 } 405 if err != nil { 406 t.Fatal(err) 407 } 408 409 if !bytes.Equal(ret.Data(), ch.Data()) { 410 t.Fatal("expected to get the same data back, but got smth else") 411 } 412 413 log.Trace("got back chunk", "chunk", ret) 414 } 415 416 // all surplus chunks should be missing 417 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 418 if missing != expectMissing { 419 t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing) 420 } 421 422 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 423 } 424 425 // TestLDBStoreAddRemove tests that we can put and then delete a given chunk 426 func TestLDBStoreAddRemove(t *testing.T) { 427 ldb, cleanup := newLDBStore(t) 428 ldb.setCapacity(200) 429 defer cleanup() 430 431 n := 100 432 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 433 if err != nil { 434 t.Fatalf(err.Error()) 435 } 436 437 for i := 0; i < n; i++ { 438 // delete all even index chunks 439 if i%2 == 0 { 440 ldb.Delete(chunks[i].Address()) 441 } 442 } 443 444 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 445 446 for i := 0; i < n; i++ { 447 ret, err := ldb.Get(context.TODO(), chunks[i].Address()) 448 449 if i%2 == 0 { 450 // expect even chunks to be missing 451 if err == nil { 452 t.Fatal("expected chunk to be missing, but got no error") 453 } 454 } else { 455 // expect odd chunks to be retrieved successfully 456 if err != nil { 457 t.Fatalf("expected no error, but got %s", err) 458 } 459 460 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 461 t.Fatal("expected to get the same data back, but got smth else") 462 } 463 } 464 } 465 } 466 467 func testLDBStoreRemoveThenCollectGarbage(t *testing.T) { 468 t.Skip("flaky with -race flag") 469 470 params := strings.Split(t.Name(), "/") 471 capacity, err := strconv.Atoi(params[2]) 472 if err != nil { 473 t.Fatal(err) 474 } 475 n, err := strconv.Atoi(params[3]) 476 if err != nil { 477 t.Fatal(err) 478 } 479 480 ldb, cleanup := newLDBStore(t) 481 defer cleanup() 482 ldb.setCapacity(uint64(capacity)) 483 484 // put capacity count number of chunks 485 chunks := make([]Chunk, n) 486 for i := 0; i < n; i++ { 487 c := GenerateRandomChunk(ch.DefaultSize) 488 chunks[i] = c 489 log.Trace("generate random chunk", "idx", i, "chunk", c) 490 } 491 492 for i := 0; i < n; i++ { 493 err := ldb.Put(context.TODO(), chunks[i]) 494 if err != nil { 495 t.Fatal(err) 496 } 497 } 498 499 waitGc(ldb) 500 501 // delete all chunks 502 // (only count the ones actually deleted, the rest will have been gc'd) 503 deletes := 0 504 for i := 0; i < n; i++ { 505 if ldb.Delete(chunks[i].Address()) == nil { 506 deletes++ 507 } 508 } 509 510 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 511 512 if ldb.entryCnt != 0 { 513 t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt) 514 } 515 516 // the manual deletes will have increased accesscnt, so we need to add this when we verify the current count 517 expAccessCnt := uint64(n) 518 if ldb.accessCnt != expAccessCnt { 519 t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt) 520 } 521 522 // retrieve the gc round target count for the db capacity 523 ldb.startGC(capacity) 524 roundTarget := ldb.gc.target 525 526 remaining := n 527 var puts int 528 for remaining > 0 { 529 var putCount int 530 if remaining < roundTarget { 531 putCount = remaining 532 } else { 533 putCount = roundTarget 534 } 535 remaining -= putCount 536 for putCount > 0 { 537 ldb.Put(context.TODO(), chunks[puts]) 538 ldb.lock.RLock() 539 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget) 540 ldb.lock.RUnlock() 541 puts++ 542 putCount-- 543 } 544 545 waitGc(ldb) 546 } 547 548 // expect first surplus chunks to be missing, because they have the smallest access value 549 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 550 for i := 0; i < expectMissing; i++ { 551 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 552 if err == nil { 553 t.Fatalf("expected surplus chunk %d to be missing, but got no error", i) 554 } 555 } 556 557 // expect last chunks to be present, as they have the largest access value 558 for i := expectMissing; i < n; i++ { 559 ret, err := ldb.Get(context.TODO(), chunks[i].Address()) 560 if err != nil { 561 t.Fatalf("chunk %v: expected no error, but got %s", i, err) 562 } 563 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 564 t.Fatal("expected to get the same data back, but got smth else") 565 } 566 } 567 } 568 569 // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount 570 func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) { 571 572 capacity := defaultMaxGCRound / 100 * 2 573 n := capacity - 1 574 575 ldb, cleanup := newLDBStore(t) 576 ldb.setCapacity(uint64(capacity)) 577 defer cleanup() 578 579 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 580 if err != nil { 581 t.Fatal(err.Error()) 582 } 583 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 584 585 // set first added capacity/2 chunks to highest accesscount 586 for i := 0; i < capacity/2; i++ { 587 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 588 if err != nil { 589 t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err) 590 } 591 } 592 _, err = mputRandomChunks(ldb, 2, int64(ch.DefaultSize)) 593 if err != nil { 594 t.Fatal(err.Error()) 595 } 596 597 // wait for garbage collection to kick in on the responsible actor 598 waitGc(ldb) 599 600 var missing int 601 for i, ch := range chunks[2 : capacity/2] { 602 ret, err := ldb.Get(context.TODO(), ch.Address()) 603 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 604 t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err) 605 } 606 607 if !bytes.Equal(ret.Data(), ch.Data()) { 608 t.Fatal("expected to get the same data back, but got smth else") 609 } 610 log.Trace("got back chunk", "chunk", ret) 611 } 612 613 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 614 } 615 616 func TestCleanIndex(t *testing.T) { 617 capacity := 5000 618 n := 3 619 620 ldb, cleanup := newLDBStore(t) 621 ldb.setCapacity(uint64(capacity)) 622 defer cleanup() 623 624 chunks, err := mputRandomChunks(ldb, n, 4096) 625 if err != nil { 626 t.Fatal(err) 627 } 628 629 // remove the data of the first chunk 630 po := ldb.po(chunks[0].Address()[:]) 631 dataKey := make([]byte, 10) 632 dataKey[0] = keyData 633 dataKey[1] = byte(po) 634 // dataKey[2:10] = first chunk has storageIdx 0 on [2:10] 635 if _, err := ldb.db.Get(dataKey); err != nil { 636 t.Fatal(err) 637 } 638 if err := ldb.db.Delete(dataKey); err != nil { 639 t.Fatal(err) 640 } 641 642 // remove the gc index row for the first chunk 643 gcFirstCorrectKey := make([]byte, 9) 644 gcFirstCorrectKey[0] = keyGCIdx 645 if err := ldb.db.Delete(gcFirstCorrectKey); err != nil { 646 t.Fatal(err) 647 } 648 649 // warp the gc data of the second chunk 650 // this data should be correct again after the clean 651 gcSecondCorrectKey := make([]byte, 9) 652 gcSecondCorrectKey[0] = keyGCIdx 653 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1)) 654 gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey) 655 if err != nil { 656 t.Fatal(err) 657 } 658 warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1) 659 copy(warpedGCVal[1:], gcSecondCorrectVal) 660 if err := ldb.db.Delete(gcSecondCorrectKey); err != nil { 661 t.Fatal(err) 662 } 663 if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil { 664 t.Fatal(err) 665 } 666 667 if err := ldb.CleanGCIndex(); err != nil { 668 t.Fatal(err) 669 } 670 671 // the index without corresponding data should have been deleted 672 idxKey := make([]byte, 33) 673 idxKey[0] = keyIndex 674 copy(idxKey[1:], chunks[0].Address()) 675 if _, err := ldb.db.Get(idxKey); err == nil { 676 t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey) 677 } 678 679 // the two other indices should be present 680 copy(idxKey[1:], chunks[1].Address()) 681 if _, err := ldb.db.Get(idxKey); err != nil { 682 t.Fatalf("expected chunk 1 idx to be present: %v", idxKey) 683 } 684 685 copy(idxKey[1:], chunks[2].Address()) 686 if _, err := ldb.db.Get(idxKey); err != nil { 687 t.Fatalf("expected chunk 2 idx to be present: %v", idxKey) 688 } 689 690 // first gc index should still be gone 691 if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil { 692 t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey) 693 } 694 695 // second gc index should still be fixed 696 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 697 t.Fatalf("expected gc 1 idx to be present: %v", idxKey) 698 } 699 700 // third gc index should be unchanged 701 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2)) 702 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 703 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 704 } 705 706 c, err := ldb.db.Get(keyEntryCnt) 707 if err != nil { 708 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 709 } 710 711 // entrycount should now be one less 712 entryCount := binary.BigEndian.Uint64(c) 713 if entryCount != 2 { 714 t.Fatalf("expected entrycnt to be 2, was %d", c) 715 } 716 717 // the chunks might accidentally be in the same bin 718 // if so that bin counter will now be 2 - the highest added index. 719 // if not, the total of them will be 3 720 poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())} 721 if poBins[0] == poBins[1] { 722 poBins = poBins[:1] 723 } 724 725 var binTotal uint64 726 var currentBin [2]byte 727 currentBin[0] = keyDistanceCnt 728 if len(poBins) == 1 { 729 currentBin[1] = poBins[0] 730 c, err := ldb.db.Get(currentBin[:]) 731 if err != nil { 732 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 733 } 734 binCount := binary.BigEndian.Uint64(c) 735 if binCount != 2 { 736 t.Fatalf("expected entrycnt to be 2, was %d", binCount) 737 } 738 } else { 739 for _, bin := range poBins { 740 currentBin[1] = bin 741 c, err := ldb.db.Get(currentBin[:]) 742 if err != nil { 743 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 744 } 745 binCount := binary.BigEndian.Uint64(c) 746 binTotal += binCount 747 748 } 749 if binTotal != 3 { 750 t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal) 751 } 752 } 753 754 // check that the iterator quits properly 755 chunks, err = mputRandomChunks(ldb, 4100, 4096) 756 if err != nil { 757 t.Fatal(err) 758 } 759 760 po = ldb.po(chunks[4099].Address()[:]) 761 dataKey = make([]byte, 10) 762 dataKey[0] = keyData 763 dataKey[1] = byte(po) 764 binary.BigEndian.PutUint64(dataKey[2:], 4099+3) 765 if _, err := ldb.db.Get(dataKey); err != nil { 766 t.Fatal(err) 767 } 768 if err := ldb.db.Delete(dataKey); err != nil { 769 t.Fatal(err) 770 } 771 772 if err := ldb.CleanGCIndex(); err != nil { 773 t.Fatal(err) 774 } 775 776 // entrycount should now be one less of added chunks 777 c, err = ldb.db.Get(keyEntryCnt) 778 if err != nil { 779 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 780 } 781 entryCount = binary.BigEndian.Uint64(c) 782 if entryCount != 4099+2 { 783 t.Fatalf("expected entrycnt to be 2, was %d", c) 784 } 785 } 786 787 // Note: waitGc does not guarantee that we wait 1 GC round; it only 788 // guarantees that if the GC is running we wait for that run to finish 789 // ticket: https://github.com/ethersphere/go-ethereum/issues/1151 790 func waitGc(ldb *LDBStore) { 791 <-ldb.gc.runC 792 ldb.gc.runC <- struct{}{} 793 }