github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/swarm/storage/ldbstore_test.go (about) 1 2 //<developer> 3 // <name>linapex 曹一峰</name> 4 // <email>linapex@163.com</email> 5 // <wx>superexc</wx> 6 // <qqgroup>128148617</qqgroup> 7 // <url>https://jsq.ink</url> 8 // <role>pku engineer</role> 9 // <date>2019-03-16 19:16:45</date> 10 //</624450120112410624> 11 12 13 package storage 14 15 import ( 16 "bytes" 17 "context" 18 "encoding/binary" 19 "fmt" 20 "io/ioutil" 21 "os" 22 "strconv" 23 "strings" 24 "testing" 25 "time" 26 27 "github.com/ethereum/go-ethereum/common" 28 ch "github.com/ethereum/go-ethereum/swarm/chunk" 29 "github.com/ethereum/go-ethereum/swarm/log" 30 "github.com/ethereum/go-ethereum/swarm/storage/mock/mem" 31 ldberrors "github.com/syndtr/goleveldb/leveldb/errors" 32 ) 33 34 type testDbStore struct { 35 *LDBStore 36 dir string 37 } 38 39 func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) { 40 dir, err := ioutil.TempDir("", "bzz-storage-test") 41 if err != nil { 42 return nil, func() {}, err 43 } 44 45 var db *LDBStore 46 storeparams := NewDefaultStoreParams() 47 params := NewLDBStoreParams(storeparams, dir) 48 params.Po = testPoFunc 49 50 if mock { 51 globalStore := mem.NewGlobalStore() 52 addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed") 53 mockStore := globalStore.NewNodeStore(addr) 54 55 db, err = NewMockDbStore(params, mockStore) 56 } else { 57 db, err = NewLDBStore(params) 58 } 59 60 cleanup := func() { 61 if db != nil { 62 db.Close() 63 } 64 err = os.RemoveAll(dir) 65 if err != nil { 66 panic(fmt.Sprintf("db cleanup failed: %v", err)) 67 } 68 } 69 70 return &testDbStore{db, dir}, cleanup, err 71 } 72 73 func testPoFunc(k Address) (ret uint8) { 74 basekey := make([]byte, 32) 75 return uint8(Proximity(basekey, k[:])) 76 } 77 78 func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) { 79 db, cleanup, err := newTestDbStore(mock, true) 80 defer cleanup() 81 if err != nil { 82 t.Fatalf("init dbStore failed: %v", err) 83 } 84 testStoreRandom(db, n, chunksize, t) 85 } 86 87 func testDbStoreCorrect(n int, chunksize int64, mock bool, t *testing.T) { 88 db, cleanup, err := newTestDbStore(mock, false) 89 defer cleanup() 90 if err != nil { 91 t.Fatalf("init dbStore failed: %v", err) 92 } 93 testStoreCorrect(db, n, chunksize, t) 94 } 95 96 func TestMarkAccessed(t *testing.T) { 97 db, cleanup, err := newTestDbStore(false, true) 98 defer cleanup() 99 if err != nil { 100 t.Fatalf("init dbStore failed: %v", err) 101 } 102 103 h := GenerateRandomChunk(ch.DefaultSize) 104 105 db.Put(context.Background(), h) 106 107 var index dpaDBIndex 108 addr := h.Address() 109 idxk := getIndexKey(addr) 110 111 idata, err := db.db.Get(idxk) 112 if err != nil { 113 t.Fatal(err) 114 } 115 decodeIndex(idata, &index) 116 117 if index.Access != 0 { 118 t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access) 119 } 120 121 db.MarkAccessed(addr) 122 db.writeCurrentBatch() 123 124 idata, err = db.db.Get(idxk) 125 if err != nil { 126 t.Fatal(err) 127 } 128 decodeIndex(idata, &index) 129 130 if index.Access != 1 { 131 t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access) 132 } 133 134 } 135 136 func TestDbStoreRandom_1(t *testing.T) { 137 testDbStoreRandom(1, 0, false, t) 138 } 139 140 func TestDbStoreCorrect_1(t *testing.T) { 141 testDbStoreCorrect(1, 4096, false, t) 142 } 143 144 func TestDbStoreRandom_1k(t *testing.T) { 145 testDbStoreRandom(1000, 0, false, t) 146 } 147 148 func TestDbStoreCorrect_1k(t *testing.T) { 149 testDbStoreCorrect(1000, 4096, false, t) 150 } 151 152 func TestMockDbStoreRandom_1(t *testing.T) { 153 testDbStoreRandom(1, 0, true, t) 154 } 155 156 func TestMockDbStoreCorrect_1(t *testing.T) { 157 testDbStoreCorrect(1, 4096, true, t) 158 } 159 160 func TestMockDbStoreRandom_1k(t *testing.T) { 161 testDbStoreRandom(1000, 0, true, t) 162 } 163 164 func TestMockDbStoreCorrect_1k(t *testing.T) { 165 testDbStoreCorrect(1000, 4096, true, t) 166 } 167 168 func testDbStoreNotFound(t *testing.T, mock bool) { 169 db, cleanup, err := newTestDbStore(mock, false) 170 defer cleanup() 171 if err != nil { 172 t.Fatalf("init dbStore failed: %v", err) 173 } 174 175 _, err = db.Get(context.TODO(), ZeroAddr) 176 if err != ErrChunkNotFound { 177 t.Errorf("Expected ErrChunkNotFound, got %v", err) 178 } 179 } 180 181 func TestDbStoreNotFound(t *testing.T) { 182 testDbStoreNotFound(t, false) 183 } 184 func TestMockDbStoreNotFound(t *testing.T) { 185 testDbStoreNotFound(t, true) 186 } 187 188 func testIterator(t *testing.T, mock bool) { 189 var chunkcount int = 32 190 var i int 191 var poc uint 192 chunkkeys := NewAddressCollection(chunkcount) 193 chunkkeys_results := NewAddressCollection(chunkcount) 194 195 db, cleanup, err := newTestDbStore(mock, false) 196 defer cleanup() 197 if err != nil { 198 t.Fatalf("init dbStore failed: %v", err) 199 } 200 201 chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount) 202 203 for i = 0; i < len(chunks); i++ { 204 chunkkeys[i] = chunks[i].Address() 205 err := db.Put(context.TODO(), chunks[i]) 206 if err != nil { 207 t.Fatalf("dbStore.Put failed: %v", err) 208 } 209 } 210 211 for i = 0; i < len(chunkkeys); i++ { 212 log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i])) 213 } 214 i = 0 215 for poc = 0; poc <= 255; poc++ { 216 err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool { 217 log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc))) 218 chunkkeys_results[n] = k 219 i++ 220 return true 221 }) 222 if err != nil { 223 t.Fatalf("Iterator call failed: %v", err) 224 } 225 } 226 227 for i = 0; i < chunkcount; i++ { 228 if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) { 229 t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i]) 230 } 231 } 232 233 } 234 235 func TestIterator(t *testing.T) { 236 testIterator(t, false) 237 } 238 func TestMockIterator(t *testing.T) { 239 testIterator(t, true) 240 } 241 242 func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) { 243 db, cleanup, err := newTestDbStore(mock, true) 244 defer cleanup() 245 if err != nil { 246 b.Fatalf("init dbStore failed: %v", err) 247 } 248 benchmarkStorePut(db, n, chunksize, b) 249 } 250 251 func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) { 252 db, cleanup, err := newTestDbStore(mock, true) 253 defer cleanup() 254 if err != nil { 255 b.Fatalf("init dbStore failed: %v", err) 256 } 257 benchmarkStoreGet(db, n, chunksize, b) 258 } 259 260 func BenchmarkDbStorePut_1_500(b *testing.B) { 261 benchmarkDbStorePut(500, 1, 4096, false, b) 262 } 263 264 func BenchmarkDbStorePut_8_500(b *testing.B) { 265 benchmarkDbStorePut(500, 8, 4096, false, b) 266 } 267 268 func BenchmarkDbStoreGet_1_500(b *testing.B) { 269 benchmarkDbStoreGet(500, 1, 4096, false, b) 270 } 271 272 func BenchmarkDbStoreGet_8_500(b *testing.B) { 273 benchmarkDbStoreGet(500, 8, 4096, false, b) 274 } 275 276 func BenchmarkMockDbStorePut_1_500(b *testing.B) { 277 benchmarkDbStorePut(500, 1, 4096, true, b) 278 } 279 280 func BenchmarkMockDbStorePut_8_500(b *testing.B) { 281 benchmarkDbStorePut(500, 8, 4096, true, b) 282 } 283 284 func BenchmarkMockDbStoreGet_1_500(b *testing.B) { 285 benchmarkDbStoreGet(500, 1, 4096, true, b) 286 } 287 288 func BenchmarkMockDbStoreGet_8_500(b *testing.B) { 289 benchmarkDbStoreGet(500, 8, 4096, true, b) 290 } 291 292 //testldbstore没有收集垃圾测试,我们可以在leveldb存储中放置许多随机块,以及 293 //如果我们不撞到垃圾收集,就把它们取回 294 func TestLDBStoreWithoutCollectGarbage(t *testing.T) { 295 capacity := 50 296 n := 10 297 298 ldb, cleanup := newLDBStore(t) 299 ldb.setCapacity(uint64(capacity)) 300 defer cleanup() 301 302 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 303 if err != nil { 304 t.Fatal(err.Error()) 305 } 306 307 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 308 309 for _, ch := range chunks { 310 ret, err := ldb.Get(context.TODO(), ch.Address()) 311 if err != nil { 312 t.Fatal(err) 313 } 314 315 if !bytes.Equal(ret.Data(), ch.Data()) { 316 t.Fatal("expected to get the same data back, but got smth else") 317 } 318 } 319 320 if ldb.entryCnt != uint64(n) { 321 t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt) 322 } 323 324 if ldb.accessCnt != uint64(2*n) { 325 t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt) 326 } 327 } 328 329 //testldbstorecollectgarbage测试,我们可以放入比leveldb容量更多的块,以及 330 //只检索其中的一部分,因为垃圾收集必须已部分清除存储区 331 //还测试我们是否可以删除块以及是否可以触发垃圾收集 332 func TestLDBStoreCollectGarbage(t *testing.T) { 333 334 //低于马克洛德 335 initialCap := defaultMaxGCRound / 100 336 cap := initialCap / 2 337 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 338 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 339 340 //在最大回合 341 cap = initialCap 342 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 343 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 344 345 //大于最大值,不在阈值上 346 cap = initialCap + 500 347 t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage) 348 t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage) 349 350 } 351 352 func testLDBStoreCollectGarbage(t *testing.T) { 353 params := strings.Split(t.Name(), "/") 354 capacity, err := strconv.Atoi(params[2]) 355 if err != nil { 356 t.Fatal(err) 357 } 358 n, err := strconv.Atoi(params[3]) 359 if err != nil { 360 t.Fatal(err) 361 } 362 363 ldb, cleanup := newLDBStore(t) 364 ldb.setCapacity(uint64(capacity)) 365 defer cleanup() 366 367 //检索数据库容量的gc舍入目标计数 368 ldb.startGC(capacity) 369 roundTarget := ldb.gc.target 370 371 //将放置计数拆分为gc目标计数阈值,并等待gc在这两个阈值之间完成 372 var allChunks []Chunk 373 remaining := n 374 for remaining > 0 { 375 var putCount int 376 if remaining < roundTarget { 377 putCount = remaining 378 } else { 379 putCount = roundTarget 380 } 381 remaining -= putCount 382 chunks, err := mputRandomChunks(ldb, putCount, int64(ch.DefaultSize)) 383 if err != nil { 384 t.Fatal(err.Error()) 385 } 386 allChunks = append(allChunks, chunks...) 387 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n) 388 389 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 390 defer cancel() 391 waitGc(ctx, ldb) 392 } 393 394 //尝试获取所有放置块 395 var missing int 396 for _, ch := range allChunks { 397 ret, err := ldb.Get(context.TODO(), ch.Address()) 398 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 399 missing++ 400 continue 401 } 402 if err != nil { 403 t.Fatal(err) 404 } 405 406 if !bytes.Equal(ret.Data(), ch.Data()) { 407 t.Fatal("expected to get the same data back, but got smth else") 408 } 409 410 log.Trace("got back chunk", "chunk", ret) 411 } 412 413 //所有剩余的块都应该丢失 414 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 415 if missing != expectMissing { 416 t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing) 417 } 418 419 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 420 } 421 422 //testldbstoreaddremove我们可以放置的测试,然后删除给定的块。 423 func TestLDBStoreAddRemove(t *testing.T) { 424 ldb, cleanup := newLDBStore(t) 425 ldb.setCapacity(200) 426 defer cleanup() 427 428 n := 100 429 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 430 if err != nil { 431 t.Fatalf(err.Error()) 432 } 433 434 for i := 0; i < n; i++ { 435 //删除所有偶数索引块 436 if i%2 == 0 { 437 ldb.Delete(chunks[i].Address()) 438 } 439 } 440 441 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 442 443 for i := 0; i < n; i++ { 444 ret, err := ldb.Get(context.TODO(), chunks[i].Address()) 445 446 if i%2 == 0 { 447 //预期甚至会丢失块 448 if err == nil { 449 t.Fatal("expected chunk to be missing, but got no error") 450 } 451 } else { 452 //希望成功检索奇数块 453 if err != nil { 454 t.Fatalf("expected no error, but got %s", err) 455 } 456 457 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 458 t.Fatal("expected to get the same data back, but got smth else") 459 } 460 } 461 } 462 } 463 464 func testLDBStoreRemoveThenCollectGarbage(t *testing.T) { 465 466 params := strings.Split(t.Name(), "/") 467 capacity, err := strconv.Atoi(params[2]) 468 if err != nil { 469 t.Fatal(err) 470 } 471 n, err := strconv.Atoi(params[3]) 472 if err != nil { 473 t.Fatal(err) 474 } 475 476 ldb, cleanup := newLDBStore(t) 477 defer cleanup() 478 ldb.setCapacity(uint64(capacity)) 479 480 //放置容量计数块数 481 chunks := make([]Chunk, n) 482 for i := 0; i < n; i++ { 483 c := GenerateRandomChunk(ch.DefaultSize) 484 chunks[i] = c 485 log.Trace("generate random chunk", "idx", i, "chunk", c) 486 } 487 488 for i := 0; i < n; i++ { 489 err := ldb.Put(context.TODO(), chunks[i]) 490 if err != nil { 491 t.Fatal(err) 492 } 493 } 494 495 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 496 defer cancel() 497 waitGc(ctx, ldb) 498 499 //删除所有块 500 //(只统计实际删除的部分,其余部分将被gc'd删除) 501 deletes := 0 502 for i := 0; i < n; i++ { 503 if ldb.Delete(chunks[i].Address()) == nil { 504 deletes++ 505 } 506 } 507 508 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 509 510 if ldb.entryCnt != 0 { 511 t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt) 512 } 513 514 //手动删除会增加accesscnt,所以我们需要在验证当前计数时添加它。 515 expAccessCnt := uint64(n) 516 if ldb.accessCnt != expAccessCnt { 517 t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt) 518 } 519 520 //检索数据库容量的gc舍入目标计数 521 ldb.startGC(capacity) 522 roundTarget := ldb.gc.target 523 524 remaining := n 525 var puts int 526 for remaining > 0 { 527 var putCount int 528 if remaining < roundTarget { 529 putCount = remaining 530 } else { 531 putCount = roundTarget 532 } 533 remaining -= putCount 534 for putCount > 0 { 535 ldb.Put(context.TODO(), chunks[puts]) 536 log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget) 537 puts++ 538 putCount-- 539 } 540 541 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 542 defer cancel() 543 waitGc(ctx, ldb) 544 } 545 546 //由于第一个剩余块具有最小的访问值,因此它们将丢失。 547 expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget) 548 for i := 0; i < expectMissing; i++ { 549 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 550 if err == nil { 551 t.Fatalf("expected surplus chunk %d to be missing, but got no error", i) 552 } 553 } 554 555 //希望最后一个块出现,因为它们具有最大的访问值 556 for i := expectMissing; i < n; i++ { 557 ret, err := ldb.Get(context.TODO(), chunks[i].Address()) 558 if err != nil { 559 t.Fatalf("chunk %v: expected no error, but got %s", i, err) 560 } 561 if !bytes.Equal(ret.Data(), chunks[i].Data()) { 562 t.Fatal("expected to get the same data back, but got smth else") 563 } 564 } 565 } 566 567 //testldbstorecollectgarbageaccessunlkiendex测试垃圾收集,其中accessCount与indexCount不同 568 func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) { 569 570 capacity := defaultMaxGCRound / 100 * 2 571 n := capacity - 1 572 573 ldb, cleanup := newLDBStore(t) 574 ldb.setCapacity(uint64(capacity)) 575 defer cleanup() 576 577 chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize)) 578 if err != nil { 579 t.Fatal(err.Error()) 580 } 581 log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 582 583 //将第一个添加的容量/2块设置为最高访问计数 584 for i := 0; i < capacity/2; i++ { 585 _, err := ldb.Get(context.TODO(), chunks[i].Address()) 586 if err != nil { 587 t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err) 588 } 589 } 590 _, err = mputRandomChunks(ldb, 2, int64(ch.DefaultSize)) 591 if err != nil { 592 t.Fatal(err.Error()) 593 } 594 595 //等待垃圾收集启动负责的参与者 596 ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 597 defer cancel() 598 waitGc(ctx, ldb) 599 600 var missing int 601 for i, ch := range chunks[2 : capacity/2] { 602 ret, err := ldb.Get(context.TODO(), ch.Address()) 603 if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { 604 t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err) 605 } 606 607 if !bytes.Equal(ret.Data(), ch.Data()) { 608 t.Fatal("expected to get the same data back, but got smth else") 609 } 610 log.Trace("got back chunk", "chunk", ret) 611 } 612 613 log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) 614 } 615 616 func TestCleanIndex(t *testing.T) { 617 capacity := 5000 618 n := 3 619 620 ldb, cleanup := newLDBStore(t) 621 ldb.setCapacity(uint64(capacity)) 622 defer cleanup() 623 624 chunks, err := mputRandomChunks(ldb, n, 4096) 625 if err != nil { 626 t.Fatal(err) 627 } 628 629 //删除第一个块的数据 630 po := ldb.po(chunks[0].Address()[:]) 631 dataKey := make([]byte, 10) 632 dataKey[0] = keyData 633 dataKey[1] = byte(po) 634 //datakey[2:10]=第一个区块在[2:10]上具有storageIDX 0 635 if _, err := ldb.db.Get(dataKey); err != nil { 636 t.Fatal(err) 637 } 638 if err := ldb.db.Delete(dataKey); err != nil { 639 t.Fatal(err) 640 } 641 642 //删除第一个块的GC索引行 643 gcFirstCorrectKey := make([]byte, 9) 644 gcFirstCorrectKey[0] = keyGCIdx 645 if err := ldb.db.Delete(gcFirstCorrectKey); err != nil { 646 t.Fatal(err) 647 } 648 649 //扭曲第二个块的GC数据 650 //清洁后,此数据应再次正确。 651 gcSecondCorrectKey := make([]byte, 9) 652 gcSecondCorrectKey[0] = keyGCIdx 653 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1)) 654 gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey) 655 if err != nil { 656 t.Fatal(err) 657 } 658 warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1) 659 copy(warpedGCVal[1:], gcSecondCorrectVal) 660 if err := ldb.db.Delete(gcSecondCorrectKey); err != nil { 661 t.Fatal(err) 662 } 663 if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil { 664 t.Fatal(err) 665 } 666 667 if err := ldb.CleanGCIndex(); err != nil { 668 t.Fatal(err) 669 } 670 671 //没有相应数据的索引应该已被删除 672 idxKey := make([]byte, 33) 673 idxKey[0] = keyIndex 674 copy(idxKey[1:], chunks[0].Address()) 675 if _, err := ldb.db.Get(idxKey); err == nil { 676 t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey) 677 } 678 679 //另外两个指数也应该存在 680 copy(idxKey[1:], chunks[1].Address()) 681 if _, err := ldb.db.Get(idxKey); err != nil { 682 t.Fatalf("expected chunk 1 idx to be present: %v", idxKey) 683 } 684 685 copy(idxKey[1:], chunks[2].Address()) 686 if _, err := ldb.db.Get(idxKey); err != nil { 687 t.Fatalf("expected chunk 2 idx to be present: %v", idxKey) 688 } 689 690 //第一个GC索引应该仍然不存在 691 if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil { 692 t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey) 693 } 694 695 //第二个GC索引应该仍然是固定的 696 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 697 t.Fatalf("expected gc 1 idx to be present: %v", idxKey) 698 } 699 700 //第三个GC索引应保持不变 701 binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2)) 702 if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil { 703 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 704 } 705 706 c, err := ldb.db.Get(keyEntryCnt) 707 if err != nil { 708 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 709 } 710 711 //EntryCount现在应该少一个 712 entryCount := binary.BigEndian.Uint64(c) 713 if entryCount != 2 { 714 t.Fatalf("expected entrycnt to be 2, was %d", c) 715 } 716 717 //块可能意外地在同一个容器中 718 //如果是这样的话,这个bin计数器现在将是2-最大的添加索引。 719 //如果没有,总共是3个 720 poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())} 721 if poBins[0] == poBins[1] { 722 poBins = poBins[:1] 723 } 724 725 var binTotal uint64 726 var currentBin [2]byte 727 currentBin[0] = keyDistanceCnt 728 if len(poBins) == 1 { 729 currentBin[1] = poBins[0] 730 c, err := ldb.db.Get(currentBin[:]) 731 if err != nil { 732 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 733 } 734 binCount := binary.BigEndian.Uint64(c) 735 if binCount != 2 { 736 t.Fatalf("expected entrycnt to be 2, was %d", binCount) 737 } 738 } else { 739 for _, bin := range poBins { 740 currentBin[1] = bin 741 c, err := ldb.db.Get(currentBin[:]) 742 if err != nil { 743 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 744 } 745 binCount := binary.BigEndian.Uint64(c) 746 binTotal += binCount 747 748 } 749 if binTotal != 3 { 750 t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal) 751 } 752 } 753 754 //检查迭代器是否正确退出 755 chunks, err = mputRandomChunks(ldb, 4100, 4096) 756 if err != nil { 757 t.Fatal(err) 758 } 759 760 po = ldb.po(chunks[4099].Address()[:]) 761 dataKey = make([]byte, 10) 762 dataKey[0] = keyData 763 dataKey[1] = byte(po) 764 binary.BigEndian.PutUint64(dataKey[2:], 4099+3) 765 if _, err := ldb.db.Get(dataKey); err != nil { 766 t.Fatal(err) 767 } 768 if err := ldb.db.Delete(dataKey); err != nil { 769 t.Fatal(err) 770 } 771 772 if err := ldb.CleanGCIndex(); err != nil { 773 t.Fatal(err) 774 } 775 776 //EntryCount现在应该是添加的块中少一个 777 c, err = ldb.db.Get(keyEntryCnt) 778 if err != nil { 779 t.Fatalf("expected gc 2 idx to be present: %v", idxKey) 780 } 781 entryCount = binary.BigEndian.Uint64(c) 782 if entryCount != 4099+2 { 783 t.Fatalf("expected entrycnt to be 2, was %d", c) 784 } 785 } 786 787 func waitGc(ctx context.Context, ldb *LDBStore) { 788 <-ldb.gc.runC 789 ldb.gc.runC <- struct{}{} 790 } 791