github.com/ebceco/ebc@v1.8.19-0.20190309150932-8cb0b9e06484/swarm/storage/ldbstore_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package storage 18 19 import ( 20 "bytes" 21 "context" 22 "encoding/binary" 23 "fmt" 24 "io/ioutil" 25 "os" 26 "strconv" 27 "strings" 28 "testing" 29 "time" 30 31 "github.com/ebceco/ebc/common" 32 ch "github.com/ebceco/ebc/swarm/chunk" 33 "github.com/ebceco/ebc/swarm/log" 34 "github.com/ebceco/ebc/swarm/storage/mock/mem" 35 ldberrors "github.com/syndtr/goleveldb/leveldb/errors" 36 ) 37 38 type testDbStore struct { 39 *LDBStore 40 dir string 41 } 42 43 func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) { 44 dir, err := ioutil.TempDir("", "bzz-storage-test") 45 if err != nil { 46 return nil, func() {}, err 47 } 48 49 var db *LDBStore 50 storeparams := NewDefaultStoreParams() 51 params := NewLDBStoreParams(storeparams, dir) 52 params.Po = testPoFunc 53 54 if mock { 55 globalStore := mem.NewGlobalStore() 56 addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed") 57 mockStore := globalStore.NewNodeStore(addr) 58 59 db, err = NewMockDbStore(params, mockStore) 60 } else { 61 db, err = NewLDBStore(params) 62 } 63 64 cleanup := func() { 65 if db != nil { 66 db.Close() 67 } 68 err = os.RemoveAll(dir) 69 if err != nil { 70 panic(fmt.Sprintf("db cleanup failed: %v", err)) 71 } 72 } 73 74 return &testDbStore{db, dir}, cleanup, err 75 } 76 77 func testPoFunc(k Address) (ret uint8) { 78 basekey := make([]byte, 32) 79 return uint8(Proximity(basekey, k[:])) 80 } 81 82 func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) { 83 db, cleanup, err := newTestDbStore(mock, true) 84 defer cleanup() 85 if err != nil { 86 t.Fatalf("init dbStore failed: %v", err) 87 } 88 testStoreRandom(db, n, chunksize, t) 89 } 90 91 func testDbStoreCorrect(n int, chunksize int64, mock bool, t *testing.T) { 92 db, cleanup, err := newTestDbStore(mock, false) 93 defer cleanup() 94 if err != nil { 95 t.Fatalf("init dbStore failed: %v", err) 96 } 97 testStoreCorrect(db, n, chunksize, t) 98 } 99 100 func TestMarkAccessed(t *testing.T) { 101 db, cleanup, err := newTestDbStore(false, true) 102 defer cleanup() 103 if err != nil { 104 t.Fatalf("init dbStore failed: %v", err) 105 } 106 107 h := GenerateRandomChunk(ch.DefaultSize) 108 109 db.Put(context.Background(), h) 110 111 var index dpaDBIndex 112 addr := h.Address() 113 idxk := getIndexKey(addr) 114 115 idata, err := db.db.Get(idxk) 116 if err != nil { 117 t.Fatal(err) 118 } 119 decodeIndex(idata, &index) 120 121 if index.Access != 0 { 122 t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access) 123 } 124 125 db.MarkAccessed(addr) 126 db.writeCurrentBatch() 127 128 idata, err = db.db.Get(idxk) 129 if err != nil { 130 t.Fatal(err) 131 } 132 decodeIndex(idata, &index) 133 134 if index.Access != 1 { 135 t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access) 136 } 137 138 } 139 140 func TestDbStoreRandom_1(t *testing.T) { 141 testDbStoreRandom(1, 0, false, t) 142 } 143 144 func TestDbStoreCorrect_1(t *testing.T) { 145 testDbStoreCorrect(1, 4096, false, t) 146 } 147 148 func TestDbStoreRandom_1k(t *testing.T) { 149 testDbStoreRandom(1000, 0, false, t) 150 } 151 152 func TestDbStoreCorrect_1k(t *testing.T) { 153 testDbStoreCorrect(1000, 4096, false, t) 154 } 155 156 func TestMockDbStoreRandom_1(t *testing.T) { 157 testDbStoreRandom(1, 0, true, t) 158 } 159 160 func TestMockDbStoreCorrect_1(t *testing.T) { 161 testDbStoreCorrect(1, 4096, true, t) 162 } 163 164 func TestMockDbStoreRandom_1k(t *testing.T) { 165 testDbStoreRandom(1000, 0, true, t) 166 } 167 168 func TestMockDbStoreCorrect_1k(t *testing.T) { 169 testDbStoreCorrect(1000, 4096, true, t) 170 } 171 172 func testDbStoreNotFound(t *testing.T, mock bool) { 173 db, cleanup, err := newTestDbStore(mock, false) 174 defer cleanup() 175 if err != nil { 176 t.Fatalf("init dbStore failed: %v", err) 177 } 178 179 _, err = db.Get(context.TODO(), ZeroAddr) 180 if err != ErrChunkNotFound { 181 t.Errorf("Expected ErrChunkNotFound, got %v", err) 182 } 183 } 184 185 func TestDbStoreNotFound(t *testing.T) { 186 testDbStoreNotFound(t, false) 187 } 188 func TestMockDbStoreNotFound(t *testing.T) { 189 testDbStoreNotFound(t, true) 190 } 191 192 func testIterator(t *testing.T, mock bool) { 193 var chunkcount int = 32 194 var i int 195 var poc uint 196 chunkkeys := NewAddressCollection(chunkcount) 197 chunkkeys_results := NewAddressCollection(chunkcount) 198 199 db, cleanup, err := newTestDbStore(mock, false) 200 defer cleanup() 201 if err != nil { 202 t.Fatalf("init dbStore failed: %v", err) 203 } 204 205 chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount) 206 207 for i = 0; i < len(chunks); i++ { 208 chunkkeys[i] = chunks[i].Address() 209 err := db.Put(context.TODO(), chunks[i]) 210 if err != nil { 211 t.Fatalf("dbStore.Put failed: %v", err) 212 } 213 } 214 215 for i = 0; i < len(chunkkeys); i++ { 216 log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i])) 217 } 218 i = 0 219 for poc = 0; poc <= 255; poc++ { 220 err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool { 221 log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc))) 222 chunkkeys_results[n] = k 223 i++ 224 return true 225 }) 226 if err != nil { 227 t.Fatalf("Iterator call failed: %v", err) 228 } 229 } 230 231 for i = 0; i < chunkcount; i++ { 232 if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) { 233 t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i]) 234 } 235 } 236 237 } 238 239 func TestIterator(t *testing.T) { 240 testIterator(t, false) 241 } 242 func TestMockIterator(t *testing.T) { 243 testIterator(t, true) 244 } 245 246 func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) { 247 db, cleanup, err := newTestDbStore(mock, true) 248 defer cleanup() 249 if err != nil { 250 b.Fatalf("init dbStore failed: %v", err) 251 } 252 benchmarkStorePut(db, n, chunksize, b) 253 } 254 255 func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) { 256 db, cleanup, err := newTestDbStore(mock, true) 257 defer cleanup() 258 if err != nil { 259 b.Fatalf("init dbStore failed: %v", err) 260 } 261 benchmarkStoreGet(db, n, chunksize, b) 262 } 263 264 func BenchmarkDbStorePut_1_500(b *testing.B) { 265 benchmarkDbStorePut(500, 1, 4096, false, b) 266 } 267 268 func BenchmarkDbStorePut_8_500(b *testing.B) { 269 benchmarkDbStorePut(500, 8, 4096, false, b) 270 } 271 272 func BenchmarkDbStoreGet_1_500(b *testing.B) { 273 benchmarkDbStoreGet(500, 1, 4096, false, b) 274 } 275 276 func BenchmarkDbStoreGet_8_500(b *testing.B) { 277 benchmarkDbStoreGet(500, 8, 4096, false, b) 278 } 279 280 func BenchmarkMockDbStorePut_1_500(b *testing.B) { 281 benchmarkDbStorePut(500, 1, 4096, true, b) 282 } 283 284 func BenchmarkMockDbStorePut_8_500(b *testing.B) { 285 benchmarkDbStorePut(500, 8, 4096, true, b) 286 } 287 288 func BenchmarkMockDbStoreGet_1_500(b *testing.B) { 289 benchmarkDbStoreGet(500, 1, 4096, true, b) 290 } 291 292 func BenchmarkMockDbStoreGet_8_500(b *testing.B) { 293 benchmarkDbStoreGet(500, 8, 4096, true, b) 294 } 295 296 // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and 297 // retrieve them, provided we don't hit the garbage collection 298 func TestLDBStoreWithoutCollectGarbage(t *testing.T) { 299 capacity := 50 300 n := 10 301 302 ldb, cleanup := newLDBStore(t) 303 ldb.setCapacity(uint64(capacity)) 304 defer cleanup() 305 306 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 307 if err != nil { 308 t.Fatal(err.Error()) 309 } 310 311 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 312 313 for _, ch := range chunks { 314 ret, err := ldb.Get(context.TODO(), ch.Address()) 315 if err != nil { 316 t.Fatal(err) 317 } 318 319 if !bytes.Equal(ret.Data(), ch.Data()) { 320 t.Fatal("expected to get the same data back, but got smth else") 321 } 322 } 323 324 if ldb.entryCnt != uint64(n) { 325 t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt) 326 } 327 328 if ldb.accessCnt != uint64(2*n) { 329 t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt) 330 } 331 } 332 333 // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and 334 // retrieve only some of them, because garbage collection must have partially cleared the store 335 // Also tests that we can delete chunks and that we can trigger garbage collection 336 func TestLDBStoreCollectGarbage(t *testing.T) { 337 338 // below max ronud 339 initialCap := defaultMaxGCRound / 100 340 cap := initialCap / 2 341 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 342 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 343 344 // at max round 345 cap = initialCap 346 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 347 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 348 349 // more than max around, not on threshold 350 cap = initialCap + 500 351 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 352 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 353 354 } 355 356 func testLDBStoreCollectGarbage(t *testing.T) { 357 params := strings.Split(t.Name(), "/") 358 capacity, err := strconv.Atoi(params[2]) 359 if err != nil { 360 t.Fatal(err) 361 } 362 n, err := strconv.Atoi(params[3]) 363 if err != nil { 364 t.Fatal(err) 365 } 366 367 ldb, cleanup := newLDBStore(t) 368 ldb.setCapacity(uint64(capacity)) 369 defer cleanup() 370 371 // retrieve the gc round target count for the db capacity 372 ldb.startGC(capacity) 373 roundTarget := ldb.gc.target 374 375 // split put counts to gc target count threshold, and wait for gc to finish in between 376 var allChunks []Chunk 377 remaining := n 378 for remaining > 0 { 379 var putCount int 380 if remaining < roundTarget { 381 putCount = remaining 382 } else { 383 putCount = roundTarget 384 } 385 remaining -= putCount 386 chunks, err := mputRandomChunks(ldb, putCount, int64(ch.DefaultSize)) 387 if err != nil { 388 t.Fatal(err.Error()) 389 } 390 allChunks = append(allChunks, chunks...) 391 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n) 392 393 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 394 defer cancel() 395 waitGc(ctx, ldb) 396 } 397 398 // attempt gets on all put chunks 399 var missing int 400 for _, ch := range allChunks { 401 ret, err := ldb.Get(context.TODO(), ch.Address()) 402 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 403 missing++ 404 continue 405 } 406 if err != nil { 407 t.Fatal(err) 408 } 409 410 if !bytes.Equal(ret.Data(), ch.Data()) { 411 t.Fatal("expected to get the same data back, but got smth else") 412 } 413 414 log.Trace("got back chunk", "chunk", ret) 415 } 416 417 // all surplus chunks should be missing 418 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 419 if missing != expectMissing { 420 t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing) 421 } 422 423 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 424 } 425 426 // TestLDBStoreAddRemove tests that we can put and then delete a given chunk 427 func TestLDBStoreAddRemove(t *testing.T) { 428 ldb, cleanup := newLDBStore(t) 429 ldb.setCapacity(200) 430 defer cleanup() 431 432 n := 100 433 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 434 if err != nil { 435 t.Fatalf(err.Error()) 436 } 437 438 for i := 0; i < n; i++ { 439 // delete all even index chunks 440 if i%2 == 0 { 441 ldb.Delete(chunks[i].Address()) 442 } 443 } 444 445 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 446 447 for i := 0; i < n; i++ { 448 ret, err := ldb.Get(context.TODO(), chunks[i].Address()) 449 450 if i%2 == 0 { 451 // expect even chunks to be missing 452 if err == nil { 453 t.Fatal("expected chunk to be missing, but got no error") 454 } 455 } else { 456 // expect odd chunks to be retrieved successfully 457 if err != nil { 458 t.Fatalf("expected no error, but got %s", err) 459 } 460 461 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 462 t.Fatal("expected to get the same data back, but got smth else") 463 } 464 } 465 } 466 } 467 468 func testLDBStoreRemoveThenCollectGarbage(t *testing.T) { 469 470 params := strings.Split(t.Name(), "/") 471 capacity, err := strconv.Atoi(params[2]) 472 if err != nil { 473 t.Fatal(err) 474 } 475 n, err := strconv.Atoi(params[3]) 476 if err != nil { 477 t.Fatal(err) 478 } 479 480 ldb, cleanup := newLDBStore(t) 481 defer cleanup() 482 ldb.setCapacity(uint64(capacity)) 483 484 // put capacity count number of chunks 485 chunks := make([]Chunk, n) 486 for i := 0; i < n; i++ { 487 c := GenerateRandomChunk(ch.DefaultSize) 488 chunks[i] = c 489 log.Trace("generate random chunk", "idx", i, "chunk", c) 490 } 491 492 for i := 0; i < n; i++ { 493 err := ldb.Put(context.TODO(), chunks[i]) 494 if err != nil { 495 t.Fatal(err) 496 } 497 } 498 499 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 500 defer cancel() 501 waitGc(ctx, ldb) 502 503 // delete all chunks 504 // (only count the ones actually deleted, the rest will have been gc'd) 505 deletes := 0 506 for i := 0; i < n; i++ { 507 if ldb.Delete(chunks[i].Address()) == nil { 508 deletes++ 509 } 510 } 511 512 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 513 514 if ldb.entryCnt != 0 { 515 t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt) 516 } 517 518 // the manual deletes will have increased accesscnt, so we need to add this when we verify the current count 519 expAccessCnt := uint64(n) 520 if ldb.accessCnt != expAccessCnt { 521 t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt) 522 } 523 524 // retrieve the gc round target count for the db capacity 525 ldb.startGC(capacity) 526 roundTarget := ldb.gc.target 527 528 remaining := n 529 var puts int 530 for remaining > 0 { 531 var putCount int 532 if remaining < roundTarget { 533 putCount = remaining 534 } else { 535 putCount = roundTarget 536 } 537 remaining -= putCount 538 for putCount > 0 { 539 ldb.Put(context.TODO(), chunks[puts]) 540 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget) 541 puts++ 542 putCount-- 543 } 544 545 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 546 defer cancel() 547 waitGc(ctx, ldb) 548 } 549 550 // expect first surplus chunks to be missing, because they have the smallest access value 551 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 552 for i := 0; i < expectMissing; i++ { 553 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 554 if err == nil { 555 t.Fatalf("expected surplus chunk %d to be missing, but got no error", i) 556 } 557 } 558 559 // expect last chunks to be present, as they have the largest access value 560 for i := expectMissing; i < n; i++ { 561 ret, err := ldb.Get(context.TODO(), chunks[i].Address()) 562 if err != nil { 563 t.Fatalf("chunk %v: expected no error, but got %s", i, err) 564 } 565 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 566 t.Fatal("expected to get the same data back, but got smth else") 567 } 568 } 569 } 570 571 // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount 572 func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) { 573 574 capacity := defaultMaxGCRound / 100 * 2 575 n := capacity - 1 576 577 ldb, cleanup := newLDBStore(t) 578 ldb.setCapacity(uint64(capacity)) 579 defer cleanup() 580 581 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 582 if err != nil { 583 t.Fatal(err.Error()) 584 } 585 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 586 587 // set first added capacity/2 chunks to highest accesscount 588 for i := 0; i < capacity/2; i++ { 589 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 590 if err != nil { 591 t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err) 592 } 593 } 594 _, err = mputRandomChunks(ldb, 2, int64(ch.DefaultSize)) 595 if err != nil { 596 t.Fatal(err.Error()) 597 } 598 599 // wait for garbage collection to kick in on the responsible actor 600 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 601 defer cancel() 602 waitGc(ctx, ldb) 603 604 var missing int 605 for i, ch := range chunks[2 : capacity/2] { 606 ret, err := ldb.Get(context.TODO(), ch.Address()) 607 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 608 t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err) 609 } 610 611 if !bytes.Equal(ret.Data(), ch.Data()) { 612 t.Fatal("expected to get the same data back, but got smth else") 613 } 614 log.Trace("got back chunk", "chunk", ret) 615 } 616 617 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 618 } 619 620 func TestCleanIndex(t *testing.T) { 621 capacity := 5000 622 n := 3 623 624 ldb, cleanup := newLDBStore(t) 625 ldb.setCapacity(uint64(capacity)) 626 defer cleanup() 627 628 chunks, err := mputRandomChunks(ldb, n, 4096) 629 if err != nil { 630 t.Fatal(err) 631 } 632 633 // remove the data of the first chunk 634 po := ldb.po(chunks[0].Address()[:]) 635 dataKey := make([]byte, 10) 636 dataKey[0] = keyData 637 dataKey[1] = byte(po) 638 // dataKey[2:10] = first chunk has storageIdx 0 on [2:10] 639 if _, err := ldb.db.Get(dataKey); err != nil { 640 t.Fatal(err) 641 } 642 if err := ldb.db.Delete(dataKey); err != nil { 643 t.Fatal(err) 644 } 645 646 // remove the gc index row for the first chunk 647 gcFirstCorrectKey := make([]byte, 9) 648 gcFirstCorrectKey[0] = keyGCIdx 649 if err := ldb.db.Delete(gcFirstCorrectKey); err != nil { 650 t.Fatal(err) 651 } 652 653 // warp the gc data of the second chunk 654 // this data should be correct again after the clean 655 gcSecondCorrectKey := make([]byte, 9) 656 gcSecondCorrectKey[0] = keyGCIdx 657 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1)) 658 gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey) 659 if err != nil { 660 t.Fatal(err) 661 } 662 warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1) 663 copy(warpedGCVal[1:], gcSecondCorrectVal) 664 if err := ldb.db.Delete(gcSecondCorrectKey); err != nil { 665 t.Fatal(err) 666 } 667 if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil { 668 t.Fatal(err) 669 } 670 671 if err := ldb.CleanGCIndex(); err != nil { 672 t.Fatal(err) 673 } 674 675 // the index without corresponding data should have been deleted 676 idxKey := make([]byte, 33) 677 idxKey[0] = keyIndex 678 copy(idxKey[1:], chunks[0].Address()) 679 if _, err := ldb.db.Get(idxKey); err == nil { 680 t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey) 681 } 682 683 // the two other indices should be present 684 copy(idxKey[1:], chunks[1].Address()) 685 if _, err := ldb.db.Get(idxKey); err != nil { 686 t.Fatalf("expected chunk 1 idx to be present: %v", idxKey) 687 } 688 689 copy(idxKey[1:], chunks[2].Address()) 690 if _, err := ldb.db.Get(idxKey); err != nil { 691 t.Fatalf("expected chunk 2 idx to be present: %v", idxKey) 692 } 693 694 // first gc index should still be gone 695 if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil { 696 t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey) 697 } 698 699 // second gc index should still be fixed 700 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 701 t.Fatalf("expected gc 1 idx to be present: %v", idxKey) 702 } 703 704 // third gc index should be unchanged 705 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2)) 706 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 707 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 708 } 709 710 c, err := ldb.db.Get(keyEntryCnt) 711 if err != nil { 712 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 713 } 714 715 // entrycount should now be one less 716 entryCount := binary.BigEndian.Uint64(c) 717 if entryCount != 2 { 718 t.Fatalf("expected entrycnt to be 2, was %d", c) 719 } 720 721 // the chunks might accidentally be in the same bin 722 // if so that bin counter will now be 2 - the highest added index. 723 // if not, the total of them will be 3 724 poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())} 725 if poBins[0] == poBins[1] { 726 poBins = poBins[:1] 727 } 728 729 var binTotal uint64 730 var currentBin [2]byte 731 currentBin[0] = keyDistanceCnt 732 if len(poBins) == 1 { 733 currentBin[1] = poBins[0] 734 c, err := ldb.db.Get(currentBin[:]) 735 if err != nil { 736 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 737 } 738 binCount := binary.BigEndian.Uint64(c) 739 if binCount != 2 { 740 t.Fatalf("expected entrycnt to be 2, was %d", binCount) 741 } 742 } else { 743 for _, bin := range poBins { 744 currentBin[1] = bin 745 c, err := ldb.db.Get(currentBin[:]) 746 if err != nil { 747 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 748 } 749 binCount := binary.BigEndian.Uint64(c) 750 binTotal += binCount 751 752 } 753 if binTotal != 3 { 754 t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal) 755 } 756 } 757 758 // check that the iterator quits properly 759 chunks, err = mputRandomChunks(ldb, 4100, 4096) 760 if err != nil { 761 t.Fatal(err) 762 } 763 764 po = ldb.po(chunks[4099].Address()[:]) 765 dataKey = make([]byte, 10) 766 dataKey[0] = keyData 767 dataKey[1] = byte(po) 768 binary.BigEndian.PutUint64(dataKey[2:], 4099+3) 769 if _, err := ldb.db.Get(dataKey); err != nil { 770 t.Fatal(err) 771 } 772 if err := ldb.db.Delete(dataKey); err != nil { 773 t.Fatal(err) 774 } 775 776 if err := ldb.CleanGCIndex(); err != nil { 777 t.Fatal(err) 778 } 779 780 // entrycount should now be one less of added chunks 781 c, err = ldb.db.Get(keyEntryCnt) 782 if err != nil { 783 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 784 } 785 entryCount = binary.BigEndian.Uint64(c) 786 if entryCount != 4099+2 { 787 t.Fatalf("expected entrycnt to be 2, was %d", c) 788 } 789 } 790 791 func waitGc(ctx context.Context, ldb *LDBStore) { 792 <-ldb.gc.runC 793 ldb.gc.runC <- struct{}{} 794 }