github.com/hyperion-hyn/go-ethereum@v2.4.0+incompatible/swarm/storage/ldbstore_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package storage 18 19 import ( 20 "bytes" 21 "context" 22 "encoding/binary" 23 "fmt" 24 "io/ioutil" 25 "os" 26 "strconv" 27 "strings" 28 "testing" 29 "time" 30 31 "github.com/ethereum/go-ethereum/common" 32 ch "github.com/ethereum/go-ethereum/swarm/chunk" 33 "github.com/ethereum/go-ethereum/swarm/log" 34 "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" 35 ldberrors "github.com/syndtr/goleveldb/leveldb/errors" 36 ) 37 38 type testDbStore struct { 39 *LDBStore 40 dir string 41 } 42 43 func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) { 44 dir, err := ioutil.TempDir("", "bzz-storage-test") 45 if err != nil { 46 return nil, func() {}, err 47 } 48 49 var db *LDBStore 50 storeparams := NewDefaultStoreParams() 51 params := NewLDBStoreParams(storeparams, dir) 52 params.Po = testPoFunc 53 54 if mock { 55 globalStore := mem.NewGlobalStore() 56 addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed") 57 mockStore := globalStore.NewNodeStore(addr) 58 59 db, err = NewMockDbStore(params, mockStore) 60 } else { 61 db, err = NewLDBStore(params) 62 } 63 64 cleanup := func() { 65 if db != nil { 66 db.Close() 67 } 68 err = os.RemoveAll(dir) 69 if err != nil { 70 panic(fmt.Sprintf("db cleanup failed: %v", err)) 71 } 72 } 73 74 return &testDbStore{db, dir}, cleanup, err 75 } 76 77 func testPoFunc(k Address) (ret uint8) { 78 basekey := make([]byte, 32) 79 return uint8(Proximity(basekey, k[:])) 80 } 81 82 func (db *testDbStore) close() { 83 db.Close() 84 err := os.RemoveAll(db.dir) 85 if err != nil { 86 panic(err) 87 } 88 } 89 90 func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) { 91 db, cleanup, err := newTestDbStore(mock, true) 92 defer cleanup() 93 if err != nil { 94 t.Fatalf("init dbStore failed: %v", err) 95 } 96 testStoreRandom(db, n, chunksize, t) 97 } 98 99 func testDbStoreCorrect(n int, chunksize int64, mock bool, t *testing.T) { 100 db, cleanup, err := newTestDbStore(mock, false) 101 defer cleanup() 102 if err != nil { 103 t.Fatalf("init dbStore failed: %v", err) 104 } 105 testStoreCorrect(db, n, chunksize, t) 106 } 107 108 func TestMarkAccessed(t *testing.T) { 109 db, cleanup, err := newTestDbStore(false, true) 110 defer cleanup() 111 if err != nil { 112 t.Fatalf("init dbStore failed: %v", err) 113 } 114 115 h := GenerateRandomChunk(ch.DefaultSize) 116 117 db.Put(context.Background(), h) 118 119 var index dpaDBIndex 120 addr := h.Address() 121 idxk := getIndexKey(addr) 122 123 idata, err := db.db.Get(idxk) 124 if err != nil { 125 t.Fatal(err) 126 } 127 decodeIndex(idata, &index) 128 129 if index.Access != 0 { 130 t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access) 131 } 132 133 db.MarkAccessed(addr) 134 db.writeCurrentBatch() 135 136 idata, err = db.db.Get(idxk) 137 if err != nil { 138 t.Fatal(err) 139 } 140 decodeIndex(idata, &index) 141 142 if index.Access != 1 { 143 t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access) 144 } 145 146 } 147 148 func TestDbStoreRandom_1(t *testing.T) { 149 testDbStoreRandom(1, 0, false, t) 150 } 151 152 func TestDbStoreCorrect_1(t *testing.T) { 153 testDbStoreCorrect(1, 4096, false, t) 154 } 155 156 func TestDbStoreRandom_1k(t *testing.T) { 157 testDbStoreRandom(1000, 0, false, t) 158 } 159 160 func TestDbStoreCorrect_1k(t *testing.T) { 161 testDbStoreCorrect(1000, 4096, false, t) 162 } 163 164 func TestMockDbStoreRandom_1(t *testing.T) { 165 testDbStoreRandom(1, 0, true, t) 166 } 167 168 func TestMockDbStoreCorrect_1(t *testing.T) { 169 testDbStoreCorrect(1, 4096, true, t) 170 } 171 172 func TestMockDbStoreRandom_1k(t *testing.T) { 173 testDbStoreRandom(1000, 0, true, t) 174 } 175 176 func TestMockDbStoreCorrect_1k(t *testing.T) { 177 testDbStoreCorrect(1000, 4096, true, t) 178 } 179 180 func testDbStoreNotFound(t *testing.T, mock bool) { 181 db, cleanup, err := newTestDbStore(mock, false) 182 defer cleanup() 183 if err != nil { 184 t.Fatalf("init dbStore failed: %v", err) 185 } 186 187 _, err = db.Get(context.TODO(), ZeroAddr) 188 if err != ErrChunkNotFound { 189 t.Errorf("Expected ErrChunkNotFound, got %v", err) 190 } 191 } 192 193 func TestDbStoreNotFound(t *testing.T) { 194 testDbStoreNotFound(t, false) 195 } 196 func TestMockDbStoreNotFound(t *testing.T) { 197 testDbStoreNotFound(t, true) 198 } 199 200 func testIterator(t *testing.T, mock bool) { 201 var chunkcount int = 32 202 var i int 203 var poc uint 204 chunkkeys := NewAddressCollection(chunkcount) 205 chunkkeys_results := NewAddressCollection(chunkcount) 206 207 db, cleanup, err := newTestDbStore(mock, false) 208 defer cleanup() 209 if err != nil { 210 t.Fatalf("init dbStore failed: %v", err) 211 } 212 213 chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount) 214 215 for i = 0; i < len(chunks); i++ { 216 chunkkeys[i] = chunks[i].Address() 217 err := db.Put(context.TODO(), chunks[i]) 218 if err != nil { 219 t.Fatalf("dbStore.Put failed: %v", err) 220 } 221 } 222 223 for i = 0; i < len(chunkkeys); i++ { 224 log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i])) 225 } 226 i = 0 227 for poc = 0; poc <= 255; poc++ { 228 err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool { 229 log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc))) 230 chunkkeys_results[n] = k 231 i++ 232 return true 233 }) 234 if err != nil { 235 t.Fatalf("Iterator call failed: %v", err) 236 } 237 } 238 239 for i = 0; i < chunkcount; i++ { 240 if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) { 241 t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i]) 242 } 243 } 244 245 } 246 247 func TestIterator(t *testing.T) { 248 testIterator(t, false) 249 } 250 func TestMockIterator(t *testing.T) { 251 testIterator(t, true) 252 } 253 254 func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) { 255 db, cleanup, err := newTestDbStore(mock, true) 256 defer cleanup() 257 if err != nil { 258 b.Fatalf("init dbStore failed: %v", err) 259 } 260 benchmarkStorePut(db, n, chunksize, b) 261 } 262 263 func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) { 264 db, cleanup, err := newTestDbStore(mock, true) 265 defer cleanup() 266 if err != nil { 267 b.Fatalf("init dbStore failed: %v", err) 268 } 269 benchmarkStoreGet(db, n, chunksize, b) 270 } 271 272 func BenchmarkDbStorePut_1_500(b *testing.B) { 273 benchmarkDbStorePut(500, 1, 4096, false, b) 274 } 275 276 func BenchmarkDbStorePut_8_500(b *testing.B) { 277 benchmarkDbStorePut(500, 8, 4096, false, b) 278 } 279 280 func BenchmarkDbStoreGet_1_500(b *testing.B) { 281 benchmarkDbStoreGet(500, 1, 4096, false, b) 282 } 283 284 func BenchmarkDbStoreGet_8_500(b *testing.B) { 285 benchmarkDbStoreGet(500, 8, 4096, false, b) 286 } 287 288 func BenchmarkMockDbStorePut_1_500(b *testing.B) { 289 benchmarkDbStorePut(500, 1, 4096, true, b) 290 } 291 292 func BenchmarkMockDbStorePut_8_500(b *testing.B) { 293 benchmarkDbStorePut(500, 8, 4096, true, b) 294 } 295 296 func BenchmarkMockDbStoreGet_1_500(b *testing.B) { 297 benchmarkDbStoreGet(500, 1, 4096, true, b) 298 } 299 300 func BenchmarkMockDbStoreGet_8_500(b *testing.B) { 301 benchmarkDbStoreGet(500, 8, 4096, true, b) 302 } 303 304 // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and 305 // retrieve them, provided we don't hit the garbage collection 306 func TestLDBStoreWithoutCollectGarbage(t *testing.T) { 307 capacity := 50 308 n := 10 309 310 ldb, cleanup := newLDBStore(t) 311 ldb.setCapacity(uint64(capacity)) 312 defer cleanup() 313 314 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 315 if err != nil { 316 t.Fatal(err.Error()) 317 } 318 319 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 320 321 for _, ch := range chunks { 322 ret, err := ldb.Get(context.TODO(), ch.Address()) 323 if err != nil { 324 t.Fatal(err) 325 } 326 327 if !bytes.Equal(ret.Data(), ch.Data()) { 328 t.Fatal("expected to get the same data back, but got smth else") 329 } 330 } 331 332 if ldb.entryCnt != uint64(n) { 333 t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt) 334 } 335 336 if ldb.accessCnt != uint64(2*n) { 337 t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt) 338 } 339 } 340 341 // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and 342 // retrieve only some of them, because garbage collection must have partially cleared the store 343 // Also tests that we can delete chunks and that we can trigger garbage collection 344 func TestLDBStoreCollectGarbage(t *testing.T) { 345 346 // below max ronud 347 cap := defaultMaxGCRound / 2 348 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 349 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 350 351 // at max round 352 cap = defaultMaxGCRound 353 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 354 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 355 356 // more than max around, not on threshold 357 cap = defaultMaxGCRound * 1.1 358 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 359 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 360 361 } 362 363 func testLDBStoreCollectGarbage(t *testing.T) { 364 params := strings.Split(t.Name(), "/") 365 capacity, err := strconv.Atoi(params[2]) 366 if err != nil { 367 t.Fatal(err) 368 } 369 n, err := strconv.Atoi(params[3]) 370 if err != nil { 371 t.Fatal(err) 372 } 373 374 ldb, cleanup := newLDBStore(t) 375 ldb.setCapacity(uint64(capacity)) 376 defer cleanup() 377 378 // retrieve the gc round target count for the db capacity 379 ldb.startGC(capacity) 380 roundTarget := ldb.gc.target 381 382 // split put counts to gc target count threshold, and wait for gc to finish in between 383 var allChunks []Chunk 384 remaining := n 385 for remaining > 0 { 386 var putCount int 387 if remaining < roundTarget { 388 putCount = remaining 389 } else { 390 putCount = roundTarget 391 } 392 remaining -= putCount 393 chunks, err := mputRandomChunks(ldb, putCount, int64(ch.DefaultSize)) 394 if err != nil { 395 t.Fatal(err.Error()) 396 } 397 allChunks = append(allChunks, chunks...) 398 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n) 399 400 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 401 defer cancel() 402 waitGc(ctx, ldb) 403 } 404 405 // attempt gets on all put chunks 406 var missing int 407 for _, ch := range allChunks { 408 ret, err := ldb.Get(context.TODO(), ch.Address()) 409 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 410 missing++ 411 continue 412 } 413 if err != nil { 414 t.Fatal(err) 415 } 416 417 if !bytes.Equal(ret.Data(), ch.Data()) { 418 t.Fatal("expected to get the same data back, but got smth else") 419 } 420 421 log.Trace("got back chunk", "chunk", ret) 422 } 423 424 // all surplus chunks should be missing 425 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 426 if missing != expectMissing { 427 t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing) 428 } 429 430 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 431 } 432 433 // TestLDBStoreAddRemove tests that we can put and then delete a given chunk 434 func TestLDBStoreAddRemove(t *testing.T) { 435 ldb, cleanup := newLDBStore(t) 436 ldb.setCapacity(200) 437 defer cleanup() 438 439 n := 100 440 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 441 if err != nil { 442 t.Fatalf(err.Error()) 443 } 444 445 for i := 0; i < n; i++ { 446 // delete all even index chunks 447 if i%2 == 0 { 448 ldb.Delete(chunks[i].Address()) 449 } 450 } 451 452 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 453 454 for i := 0; i < n; i++ { 455 ret, err := ldb.Get(nil, chunks[i].Address()) 456 457 if i%2 == 0 { 458 // expect even chunks to be missing 459 if err == nil { 460 t.Fatal("expected chunk to be missing, but got no error") 461 } 462 } else { 463 // expect odd chunks to be retrieved successfully 464 if err != nil { 465 t.Fatalf("expected no error, but got %s", err) 466 } 467 468 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 469 t.Fatal("expected to get the same data back, but got smth else") 470 } 471 } 472 } 473 } 474 475 func testLDBStoreRemoveThenCollectGarbage(t *testing.T) { 476 477 params := strings.Split(t.Name(), "/") 478 capacity, err := strconv.Atoi(params[2]) 479 if err != nil { 480 t.Fatal(err) 481 } 482 n, err := strconv.Atoi(params[3]) 483 if err != nil { 484 t.Fatal(err) 485 } 486 487 ldb, cleanup := newLDBStore(t) 488 defer cleanup() 489 ldb.setCapacity(uint64(capacity)) 490 491 // put capacity count number of chunks 492 chunks := make([]Chunk, n) 493 for i := 0; i < n; i++ { 494 c := GenerateRandomChunk(ch.DefaultSize) 495 chunks[i] = c 496 log.Trace("generate random chunk", "idx", i, "chunk", c) 497 } 498 499 for i := 0; i < n; i++ { 500 err := ldb.Put(context.TODO(), chunks[i]) 501 if err != nil { 502 t.Fatal(err) 503 } 504 } 505 506 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 507 defer cancel() 508 waitGc(ctx, ldb) 509 510 // delete all chunks 511 // (only count the ones actually deleted, the rest will have been gc'd) 512 deletes := 0 513 for i := 0; i < n; i++ { 514 if ldb.Delete(chunks[i].Address()) == nil { 515 deletes++ 516 } 517 } 518 519 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 520 521 if ldb.entryCnt != 0 { 522 t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt) 523 } 524 525 // the manual deletes will have increased accesscnt, so we need to add this when we verify the current count 526 expAccessCnt := uint64(n) 527 if ldb.accessCnt != expAccessCnt { 528 t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt) 529 } 530 531 // retrieve the gc round target count for the db capacity 532 ldb.startGC(capacity) 533 roundTarget := ldb.gc.target 534 535 remaining := n 536 var puts int 537 for remaining > 0 { 538 var putCount int 539 if remaining < roundTarget { 540 putCount = remaining 541 } else { 542 putCount = roundTarget 543 } 544 remaining -= putCount 545 for putCount > 0 { 546 ldb.Put(context.TODO(), chunks[puts]) 547 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget) 548 puts++ 549 putCount-- 550 } 551 552 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 553 defer cancel() 554 waitGc(ctx, ldb) 555 } 556 557 // expect first surplus chunks to be missing, because they have the smallest access value 558 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 559 for i := 0; i < expectMissing; i++ { 560 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 561 if err == nil { 562 t.Fatalf("expected surplus chunk %d to be missing, but got no error", i) 563 } 564 } 565 566 // expect last chunks to be present, as they have the largest access value 567 for i := expectMissing; i < n; i++ { 568 ret, err := ldb.Get(context.TODO(), chunks[i].Address()) 569 if err != nil { 570 t.Fatalf("chunk %v: expected no error, but got %s", i, err) 571 } 572 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 573 t.Fatal("expected to get the same data back, but got smth else") 574 } 575 } 576 } 577 578 // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount 579 func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) { 580 581 capacity := defaultMaxGCRound * 2 582 n := capacity - 1 583 584 ldb, cleanup := newLDBStore(t) 585 ldb.setCapacity(uint64(capacity)) 586 defer cleanup() 587 588 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 589 if err != nil { 590 t.Fatal(err.Error()) 591 } 592 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 593 594 // set first added capacity/2 chunks to highest accesscount 595 for i := 0; i < capacity/2; i++ { 596 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 597 if err != nil { 598 t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err) 599 } 600 } 601 _, err = mputRandomChunks(ldb, 2, int64(ch.DefaultSize)) 602 if err != nil { 603 t.Fatal(err.Error()) 604 } 605 606 // wait for garbage collection to kick in on the responsible actor 607 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 608 defer cancel() 609 waitGc(ctx, ldb) 610 611 var missing int 612 for i, ch := range chunks[2 : capacity/2] { 613 ret, err := ldb.Get(context.TODO(), ch.Address()) 614 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 615 t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err) 616 } 617 618 if !bytes.Equal(ret.Data(), ch.Data()) { 619 t.Fatal("expected to get the same data back, but got smth else") 620 } 621 log.Trace("got back chunk", "chunk", ret) 622 } 623 624 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 625 } 626 627 func TestCleanIndex(t *testing.T) { 628 capacity := 5000 629 n := 3 630 631 ldb, cleanup := newLDBStore(t) 632 ldb.setCapacity(uint64(capacity)) 633 defer cleanup() 634 635 chunks, err := mputRandomChunks(ldb, n, 4096) 636 if err != nil { 637 t.Fatal(err) 638 } 639 640 // remove the data of the first chunk 641 po := ldb.po(chunks[0].Address()[:]) 642 dataKey := make([]byte, 10) 643 dataKey[0] = keyData 644 dataKey[1] = byte(po) 645 // dataKey[2:10] = first chunk has storageIdx 0 on [2:10] 646 if _, err := ldb.db.Get(dataKey); err != nil { 647 t.Fatal(err) 648 } 649 if err := ldb.db.Delete(dataKey); err != nil { 650 t.Fatal(err) 651 } 652 653 // remove the gc index row for the first chunk 654 gcFirstCorrectKey := make([]byte, 9) 655 gcFirstCorrectKey[0] = keyGCIdx 656 if err := ldb.db.Delete(gcFirstCorrectKey); err != nil { 657 t.Fatal(err) 658 } 659 660 // warp the gc data of the second chunk 661 // this data should be correct again after the clean 662 gcSecondCorrectKey := make([]byte, 9) 663 gcSecondCorrectKey[0] = keyGCIdx 664 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1)) 665 gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey) 666 if err != nil { 667 t.Fatal(err) 668 } 669 warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1) 670 copy(warpedGCVal[1:], gcSecondCorrectVal) 671 if err := ldb.db.Delete(gcSecondCorrectKey); err != nil { 672 t.Fatal(err) 673 } 674 if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil { 675 t.Fatal(err) 676 } 677 678 if err := ldb.CleanGCIndex(); err != nil { 679 t.Fatal(err) 680 } 681 682 // the index without corresponding data should have been deleted 683 idxKey := make([]byte, 33) 684 idxKey[0] = keyIndex 685 copy(idxKey[1:], chunks[0].Address()) 686 if _, err := ldb.db.Get(idxKey); err == nil { 687 t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey) 688 } 689 690 // the two other indices should be present 691 copy(idxKey[1:], chunks[1].Address()) 692 if _, err := ldb.db.Get(idxKey); err != nil { 693 t.Fatalf("expected chunk 1 idx to be present: %v", idxKey) 694 } 695 696 copy(idxKey[1:], chunks[2].Address()) 697 if _, err := ldb.db.Get(idxKey); err != nil { 698 t.Fatalf("expected chunk 2 idx to be present: %v", idxKey) 699 } 700 701 // first gc index should still be gone 702 if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil { 703 t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey) 704 } 705 706 // second gc index should still be fixed 707 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 708 t.Fatalf("expected gc 1 idx to be present: %v", idxKey) 709 } 710 711 // third gc index should be unchanged 712 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2)) 713 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 714 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 715 } 716 717 c, err := ldb.db.Get(keyEntryCnt) 718 if err != nil { 719 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 720 } 721 722 // entrycount should now be one less 723 entryCount := binary.BigEndian.Uint64(c) 724 if entryCount != 2 { 725 t.Fatalf("expected entrycnt to be 2, was %d", c) 726 } 727 728 // the chunks might accidentally be in the same bin 729 // if so that bin counter will now be 2 - the highest added index. 730 // if not, the total of them will be 3 731 poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())} 732 if poBins[0] == poBins[1] { 733 poBins = poBins[:1] 734 } 735 736 var binTotal uint64 737 var currentBin [2]byte 738 currentBin[0] = keyDistanceCnt 739 if len(poBins) == 1 { 740 currentBin[1] = poBins[0] 741 c, err := ldb.db.Get(currentBin[:]) 742 if err != nil { 743 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 744 } 745 binCount := binary.BigEndian.Uint64(c) 746 if binCount != 2 { 747 t.Fatalf("expected entrycnt to be 2, was %d", binCount) 748 } 749 } else { 750 for _, bin := range poBins { 751 currentBin[1] = bin 752 c, err := ldb.db.Get(currentBin[:]) 753 if err != nil { 754 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 755 } 756 binCount := binary.BigEndian.Uint64(c) 757 binTotal += binCount 758 759 } 760 if binTotal != 3 { 761 t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal) 762 } 763 } 764 } 765 766 func waitGc(ctx context.Context, ldb *LDBStore) { 767 <-ldb.gc.runC 768 ldb.gc.runC <- struct{}{} 769 }