github.com/koko1123/flow-go-1@v0.29.6/ledger/complete/ledger_test.go (about) 1 package complete_test 2 3 import ( 4 "bytes" 5 "errors" 6 "fmt" 7 "math" 8 "math/rand" 9 "testing" 10 "time" 11 12 "github.com/rs/zerolog" 13 "github.com/stretchr/testify/assert" 14 "github.com/stretchr/testify/require" 15 "go.uber.org/atomic" 16 17 "github.com/koko1123/flow-go-1/ledger" 18 "github.com/koko1123/flow-go-1/ledger/common/pathfinder" 19 "github.com/koko1123/flow-go-1/ledger/common/proof" 20 "github.com/koko1123/flow-go-1/ledger/common/testutils" 21 "github.com/koko1123/flow-go-1/ledger/complete" 22 "github.com/koko1123/flow-go-1/ledger/complete/wal" 23 "github.com/koko1123/flow-go-1/ledger/complete/wal/fixtures" 24 "github.com/koko1123/flow-go-1/ledger/partial/ptrie" 25 "github.com/koko1123/flow-go-1/module/metrics" 26 "github.com/koko1123/flow-go-1/utils/unittest" 27 ) 28 29 func TestNewLedger(t *testing.T) { 30 metricsCollector := &metrics.NoopCollector{} 31 wal := &fixtures.NoopWAL{} 32 _, err := complete.NewLedger(wal, 100, metricsCollector, zerolog.Logger{}, complete.DefaultPathFinderVersion) 33 assert.NoError(t, err) 34 35 } 36 37 func TestLedger_Update(t *testing.T) { 38 t.Run("empty update", func(t *testing.T) { 39 40 wal := &fixtures.NoopWAL{} 41 42 l, err := complete.NewLedger(wal, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 43 require.NoError(t, err) 44 45 compactor := fixtures.NewNoopCompactor(l) 46 <-compactor.Ready() 47 defer func() { 48 <-l.Done() 49 <-compactor.Done() 50 }() 51 52 // create empty update 53 currentState := l.InitialState() 54 up, err := ledger.NewEmptyUpdate(currentState) 55 require.NoError(t, err) 56 57 newState, _, err := l.Set(up) 58 require.NoError(t, err) 59 60 // state shouldn't change 61 assert.Equal(t, currentState, newState) 62 }) 63 64 t.Run("non-empty update and query", func(t *testing.T) { 65 66 // UpdateFixture 67 wal := &fixtures.NoopWAL{} 68 led, err := complete.NewLedger(wal, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 69 require.NoError(t, err) 70 71 compactor := fixtures.NewNoopCompactor(led) 72 <-compactor.Ready() 73 defer func() { 74 <-led.Done() 75 <-compactor.Done() 76 }() 77 78 curSC := led.InitialState() 79 80 u := testutils.UpdateFixture() 81 u.SetState(curSC) 82 83 newSc, _, err := led.Set(u) 84 require.NoError(t, err) 85 assert.NotEqual(t, curSC, newSc) 86 87 q, err := ledger.NewQuery(newSc, u.Keys()) 88 require.NoError(t, err) 89 90 retValues, err := led.Get(q) 91 require.NoError(t, err) 92 93 for i, v := range u.Values() { 94 assert.Equal(t, v, retValues[i]) 95 } 96 }) 97 } 98 99 func TestLedger_Get(t *testing.T) { 100 t.Run("empty query", func(t *testing.T) { 101 102 wal := &fixtures.NoopWAL{} 103 104 led, err := complete.NewLedger(wal, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 105 require.NoError(t, err) 106 107 compactor := fixtures.NewNoopCompactor(led) 108 <-compactor.Ready() 109 defer func() { 110 <-led.Done() 111 <-compactor.Done() 112 }() 113 114 curSC := led.InitialState() 115 q, err := ledger.NewEmptyQuery(curSC) 116 require.NoError(t, err) 117 118 retValues, err := led.Get(q) 119 require.NoError(t, err) 120 assert.Equal(t, len(retValues), 0) 121 }) 122 123 t.Run("empty keys", func(t *testing.T) { 124 125 wal := &fixtures.NoopWAL{} 126 127 led, err := complete.NewLedger(wal, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 128 require.NoError(t, err) 129 130 compactor := fixtures.NewNoopCompactor(led) 131 <-compactor.Ready() 132 defer func() { 133 <-led.Done() 134 <-compactor.Done() 135 }() 136 137 curS := led.InitialState() 138 139 q := testutils.QueryFixture() 140 q.SetState(curS) 141 142 retValues, err := led.Get(q) 143 require.NoError(t, err) 144 145 assert.Equal(t, 2, len(retValues)) 146 assert.Equal(t, 0, len(retValues[0])) 147 assert.Equal(t, 0, len(retValues[1])) 148 149 }) 150 } 151 152 // TestLedger_GetSingleValue tests reading value from a single path. 153 func TestLedger_GetSingleValue(t *testing.T) { 154 155 wal := &fixtures.NoopWAL{} 156 led, err := complete.NewLedger( 157 wal, 158 100, 159 &metrics.NoopCollector{}, 160 zerolog.Logger{}, 161 complete.DefaultPathFinderVersion, 162 ) 163 require.NoError(t, err) 164 165 compactor := fixtures.NewNoopCompactor(led) 166 <-compactor.Ready() 167 defer func() { 168 <-led.Done() 169 <-compactor.Done() 170 }() 171 172 state := led.InitialState() 173 174 t.Run("non-existent key", func(t *testing.T) { 175 176 keys := testutils.RandomUniqueKeys(10, 2, 1, 10) 177 178 for _, k := range keys { 179 qs, err := ledger.NewQuerySingleValue(state, k) 180 require.NoError(t, err) 181 182 retValue, err := led.GetSingleValue(qs) 183 require.NoError(t, err) 184 assert.Equal(t, 0, len(retValue)) 185 } 186 }) 187 188 t.Run("existent key", func(t *testing.T) { 189 190 u := testutils.UpdateFixture() 191 u.SetState(state) 192 193 newState, _, err := led.Set(u) 194 require.NoError(t, err) 195 assert.NotEqual(t, state, newState) 196 197 for i, k := range u.Keys() { 198 q, err := ledger.NewQuerySingleValue(newState, k) 199 require.NoError(t, err) 200 201 retValue, err := led.GetSingleValue(q) 202 require.NoError(t, err) 203 assert.Equal(t, u.Values()[i], retValue) 204 } 205 }) 206 207 t.Run("mix of existent and non-existent keys", func(t *testing.T) { 208 209 u := testutils.UpdateFixture() 210 u.SetState(state) 211 212 newState, _, err := led.Set(u) 213 require.NoError(t, err) 214 assert.NotEqual(t, state, newState) 215 216 // Save expected values for existent keys 217 expectedValues := make(map[string]ledger.Value) 218 for i, key := range u.Keys() { 219 encKey := ledger.EncodeKey(&key) 220 expectedValues[string(encKey)] = u.Values()[i] 221 } 222 223 // Create a randomly ordered mix of existent and non-existent keys 224 var queryKeys []ledger.Key 225 queryKeys = append(queryKeys, u.Keys()...) 226 queryKeys = append(queryKeys, testutils.RandomUniqueKeys(10, 2, 1, 10)...) 227 228 rand.Shuffle(len(queryKeys), func(i, j int) { 229 queryKeys[i], queryKeys[j] = queryKeys[j], queryKeys[i] 230 }) 231 232 for _, k := range queryKeys { 233 qs, err := ledger.NewQuerySingleValue(newState, k) 234 require.NoError(t, err) 235 236 retValue, err := led.GetSingleValue(qs) 237 require.NoError(t, err) 238 239 encKey := ledger.EncodeKey(&k) 240 if value, ok := expectedValues[string(encKey)]; ok { 241 require.Equal(t, value, retValue) 242 } else { 243 require.Equal(t, 0, len(retValue)) 244 } 245 } 246 }) 247 } 248 249 func TestLedgerValueSizes(t *testing.T) { 250 t.Run("empty query", func(t *testing.T) { 251 252 wal := &fixtures.NoopWAL{} 253 led, err := complete.NewLedger( 254 wal, 255 100, 256 &metrics.NoopCollector{}, 257 zerolog.Logger{}, 258 complete.DefaultPathFinderVersion, 259 ) 260 require.NoError(t, err) 261 262 compactor := fixtures.NewNoopCompactor(led) 263 <-compactor.Ready() 264 defer func() { 265 <-led.Done() 266 <-compactor.Done() 267 }() 268 269 curState := led.InitialState() 270 q, err := ledger.NewEmptyQuery(curState) 271 require.NoError(t, err) 272 273 retSizes, err := led.ValueSizes(q) 274 require.NoError(t, err) 275 require.Equal(t, 0, len(retSizes)) 276 }) 277 278 t.Run("non-existent keys", func(t *testing.T) { 279 280 wal := &fixtures.NoopWAL{} 281 led, err := complete.NewLedger( 282 wal, 283 100, 284 &metrics.NoopCollector{}, 285 zerolog.Logger{}, 286 complete.DefaultPathFinderVersion, 287 ) 288 require.NoError(t, err) 289 290 compactor := fixtures.NewNoopCompactor(led) 291 <-compactor.Ready() 292 defer func() { 293 <-led.Done() 294 <-compactor.Done() 295 }() 296 297 curState := led.InitialState() 298 q := testutils.QueryFixture() 299 q.SetState(curState) 300 301 retSizes, err := led.ValueSizes(q) 302 require.NoError(t, err) 303 require.Equal(t, len(q.Keys()), len(retSizes)) 304 for _, size := range retSizes { 305 assert.Equal(t, 0, size) 306 } 307 }) 308 309 t.Run("existent keys", func(t *testing.T) { 310 311 wal := &fixtures.NoopWAL{} 312 led, err := complete.NewLedger( 313 wal, 314 100, 315 &metrics.NoopCollector{}, 316 zerolog.Logger{}, 317 complete.DefaultPathFinderVersion, 318 ) 319 require.NoError(t, err) 320 321 compactor := fixtures.NewNoopCompactor(led) 322 <-compactor.Ready() 323 defer func() { 324 <-led.Done() 325 <-compactor.Done() 326 }() 327 328 curState := led.InitialState() 329 u := testutils.UpdateFixture() 330 u.SetState(curState) 331 332 newState, _, err := led.Set(u) 333 require.NoError(t, err) 334 assert.NotEqual(t, curState, newState) 335 336 q, err := ledger.NewQuery(newState, u.Keys()) 337 require.NoError(t, err) 338 339 retSizes, err := led.ValueSizes(q) 340 require.NoError(t, err) 341 require.Equal(t, len(q.Keys()), len(retSizes)) 342 for i, size := range retSizes { 343 assert.Equal(t, u.Values()[i].Size(), size) 344 } 345 }) 346 347 t.Run("mix of existent and non-existent keys", func(t *testing.T) { 348 349 wal := &fixtures.NoopWAL{} 350 led, err := complete.NewLedger( 351 wal, 352 100, 353 &metrics.NoopCollector{}, 354 zerolog.Logger{}, 355 complete.DefaultPathFinderVersion, 356 ) 357 require.NoError(t, err) 358 359 compactor := fixtures.NewNoopCompactor(led) 360 <-compactor.Ready() 361 defer func() { 362 <-led.Done() 363 <-compactor.Done() 364 }() 365 366 curState := led.InitialState() 367 u := testutils.UpdateFixture() 368 u.SetState(curState) 369 370 newState, _, err := led.Set(u) 371 require.NoError(t, err) 372 assert.NotEqual(t, curState, newState) 373 374 // Save expected value sizes for existent keys 375 expectedValueSizes := make(map[string]int) 376 for i, key := range u.Keys() { 377 encKey := ledger.EncodeKey(&key) 378 expectedValueSizes[string(encKey)] = len(u.Values()[i]) 379 } 380 381 // Create a randomly ordered mix of existent and non-existent keys 382 var queryKeys []ledger.Key 383 queryKeys = append(queryKeys, u.Keys()...) 384 queryKeys = append(queryKeys, testutils.RandomUniqueKeys(10, 2, 1, 10)...) 385 386 rand.Shuffle(len(queryKeys), func(i, j int) { 387 queryKeys[i], queryKeys[j] = queryKeys[j], queryKeys[i] 388 }) 389 390 q, err := ledger.NewQuery(newState, queryKeys) 391 require.NoError(t, err) 392 393 retSizes, err := led.ValueSizes(q) 394 require.NoError(t, err) 395 require.Equal(t, len(q.Keys()), len(retSizes)) 396 for i, key := range q.Keys() { 397 encKey := ledger.EncodeKey(&key) 398 assert.Equal(t, expectedValueSizes[string(encKey)], retSizes[i]) 399 } 400 }) 401 } 402 403 func TestLedger_Proof(t *testing.T) { 404 t.Run("empty query", func(t *testing.T) { 405 wal := &fixtures.NoopWAL{} 406 407 led, err := complete.NewLedger(wal, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 408 require.NoError(t, err) 409 410 compactor := fixtures.NewNoopCompactor(led) 411 <-compactor.Ready() 412 defer func() { 413 <-led.Done() 414 <-compactor.Done() 415 }() 416 417 curSC := led.InitialState() 418 q, err := ledger.NewEmptyQuery(curSC) 419 require.NoError(t, err) 420 421 retProof, err := led.Prove(q) 422 require.NoError(t, err) 423 424 proof, err := ledger.DecodeTrieBatchProof(retProof) 425 require.NoError(t, err) 426 assert.Equal(t, 0, len(proof.Proofs)) 427 }) 428 429 t.Run("non-existing keys", func(t *testing.T) { 430 431 wal := &fixtures.NoopWAL{} 432 433 led, err := complete.NewLedger(wal, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 434 require.NoError(t, err) 435 436 compactor := fixtures.NewNoopCompactor(led) 437 <-compactor.Ready() 438 defer func() { 439 <-led.Done() 440 <-compactor.Done() 441 }() 442 443 curS := led.InitialState() 444 q := testutils.QueryFixture() 445 q.SetState(curS) 446 require.NoError(t, err) 447 448 retProof, err := led.Prove(q) 449 require.NoError(t, err) 450 451 trieProof, err := ledger.DecodeTrieBatchProof(retProof) 452 require.NoError(t, err) 453 assert.Equal(t, 2, len(trieProof.Proofs)) 454 assert.True(t, proof.VerifyTrieBatchProof(trieProof, curS)) 455 456 }) 457 458 t.Run("existing keys", func(t *testing.T) { 459 460 wal := &fixtures.NoopWAL{} 461 led, err := complete.NewLedger(wal, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 462 require.NoError(t, err) 463 464 compactor := fixtures.NewNoopCompactor(led) 465 <-compactor.Ready() 466 defer func() { 467 <-led.Done() 468 <-compactor.Done() 469 }() 470 471 curS := led.InitialState() 472 473 u := testutils.UpdateFixture() 474 u.SetState(curS) 475 476 newSc, _, err := led.Set(u) 477 require.NoError(t, err) 478 assert.NotEqual(t, curS, newSc) 479 480 q, err := ledger.NewQuery(newSc, u.Keys()) 481 require.NoError(t, err) 482 483 retProof, err := led.Prove(q) 484 require.NoError(t, err) 485 486 trieProof, err := ledger.DecodeTrieBatchProof(retProof) 487 require.NoError(t, err) 488 assert.Equal(t, 2, len(trieProof.Proofs)) 489 assert.True(t, proof.VerifyTrieBatchProof(trieProof, newSc)) 490 }) 491 } 492 493 func Test_WAL(t *testing.T) { 494 const ( 495 numInsPerStep = 2 496 keyNumberOfParts = 10 497 keyPartMinByteSize = 1 498 keyPartMaxByteSize = 100 499 valueMaxByteSize = 2 << 16 //16kB 500 size = 10 501 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 502 checkpointsToKeep = 1 503 ) 504 505 metricsCollector := &metrics.NoopCollector{} 506 logger := zerolog.Logger{} 507 508 unittest.RunWithTempDir(t, func(dir string) { 509 510 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metricsCollector, dir, size, pathfinder.PathByteSize, wal.SegmentSize) 511 require.NoError(t, err) 512 513 // cache size intentionally is set to size to test deletion 514 led, err := complete.NewLedger(diskWal, size, metricsCollector, logger, complete.DefaultPathFinderVersion) 515 require.NoError(t, err) 516 517 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), size, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 518 require.NoError(t, err) 519 520 <-compactor.Ready() 521 522 var state = led.InitialState() 523 524 //saved data after updates 525 savedData := make(map[string]map[string]ledger.Value) 526 527 for i := 0; i < size; i++ { 528 529 keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize) 530 values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize) 531 update, err := ledger.NewUpdate(state, keys, values) 532 assert.NoError(t, err) 533 state, _, err = led.Set(update) 534 require.NoError(t, err) 535 536 data := make(map[string]ledger.Value, len(keys)) 537 for j, key := range keys { 538 encKey := ledger.EncodeKey(&key) 539 data[string(encKey)] = values[j] 540 } 541 542 savedData[string(state[:])] = data 543 } 544 545 <-led.Done() 546 <-compactor.Done() 547 548 diskWal2, err := wal.NewDiskWAL(zerolog.Nop(), nil, metricsCollector, dir, size, pathfinder.PathByteSize, wal.SegmentSize) 549 require.NoError(t, err) 550 551 led2, err := complete.NewLedger(diskWal2, size+10, metricsCollector, logger, complete.DefaultPathFinderVersion) 552 require.NoError(t, err) 553 554 compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 555 require.NoError(t, err) 556 557 <-compactor2.Ready() 558 559 // random map iteration order is a benefit here 560 for state, data := range savedData { 561 562 keys := make([]ledger.Key, 0, len(data)) 563 for encKey := range data { 564 key, err := ledger.DecodeKey([]byte(encKey)) 565 assert.NoError(t, err) 566 keys = append(keys, *key) 567 } 568 569 var ledgerState ledger.State 570 copy(ledgerState[:], state) 571 query, err := ledger.NewQuery(ledgerState, keys) 572 assert.NoError(t, err) 573 registerValues, err := led2.Get(query) 574 require.NoError(t, err) 575 576 for i, key := range keys { 577 registerValue := registerValues[i] 578 encKey := ledger.EncodeKey(&key) 579 assert.True(t, data[string(encKey)].Equals(registerValue)) 580 } 581 } 582 583 <-led2.Done() 584 <-compactor2.Done() 585 }) 586 } 587 588 func TestLedgerFunctionality(t *testing.T) { 589 const ( 590 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 591 checkpointsToKeep = 1 592 ) 593 594 rand.Seed(time.Now().UnixNano()) 595 // You can manually increase this for more coverage 596 experimentRep := 2 597 metricsCollector := &metrics.NoopCollector{} 598 logger := zerolog.Logger{} 599 600 for e := 0; e < experimentRep; e++ { 601 numInsPerStep := 100 602 numHistLookupPerStep := 10 603 keyNumberOfParts := 10 604 keyPartMinByteSize := 1 605 keyPartMaxByteSize := 100 606 stateComSize := 32 607 valueMaxByteSize := 2 << 16 //16kB 608 activeTries := 1000 609 steps := 40 // number of steps 610 histStorage := make(map[string]ledger.Value) // historic storage string(key, state) -> value 611 latestValue := make(map[string]ledger.Value) // key to value 612 unittest.RunWithTempDir(t, func(dbDir string) { 613 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metricsCollector, dbDir, activeTries, pathfinder.PathByteSize, wal.SegmentSize) 614 require.NoError(t, err) 615 led, err := complete.NewLedger(diskWal, activeTries, metricsCollector, logger, complete.DefaultPathFinderVersion) 616 assert.NoError(t, err) 617 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(activeTries), checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 618 require.NoError(t, err) 619 <-compactor.Ready() 620 621 state := led.InitialState() 622 for i := 0; i < steps; i++ { 623 // add new keys 624 // TODO update some of the existing keys and shuffle them 625 keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize) 626 values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize) 627 update, err := ledger.NewUpdate(state, keys, values) 628 assert.NoError(t, err) 629 newState, _, err := led.Set(update) 630 assert.NoError(t, err) 631 632 // capture new values for future query 633 for j, k := range keys { 634 encKey := ledger.EncodeKey(&k) 635 histStorage[string(newState[:])+string(encKey)] = values[j] 636 latestValue[string(encKey)] = values[j] 637 } 638 639 // read values and compare values 640 query, err := ledger.NewQuery(newState, keys) 641 assert.NoError(t, err) 642 retValues, err := led.Get(query) 643 assert.NoError(t, err) 644 // byte{} is returned as nil 645 assert.True(t, valuesMatches(values, retValues)) 646 647 // get value sizes and compare them 648 retSizes, err := led.ValueSizes(query) 649 assert.NoError(t, err) 650 assert.Equal(t, len(query.Keys()), len(retSizes)) 651 for i, size := range retSizes { 652 assert.Equal(t, values[i].Size(), size) 653 } 654 655 // validate proofs (check individual proof and batch proof) 656 proofs, err := led.Prove(query) 657 assert.NoError(t, err) 658 659 bProof, err := ledger.DecodeTrieBatchProof(proofs) 660 assert.NoError(t, err) 661 662 // validate batch proofs 663 isValid := proof.VerifyTrieBatchProof(bProof, newState) 664 assert.True(t, isValid) 665 666 // validate proofs as a batch 667 _, err = ptrie.NewPSMT(ledger.RootHash(newState), bProof) 668 assert.NoError(t, err) 669 670 // query all exising keys (check no drop) 671 for ek, v := range latestValue { 672 k, err := ledger.DecodeKey([]byte(ek)) 673 assert.NoError(t, err) 674 query, err := ledger.NewQuery(newState, []ledger.Key{*k}) 675 assert.NoError(t, err) 676 rv, err := led.Get(query) 677 assert.NoError(t, err) 678 assert.True(t, v.Equals(rv[0])) 679 } 680 681 // query some of historic values (map return is random) 682 j := 0 683 for s := range histStorage { 684 value := histStorage[s] 685 var state ledger.State 686 copy(state[:], s[:stateComSize]) 687 enk := []byte(s[stateComSize:]) 688 key, err := ledger.DecodeKey(enk) 689 assert.NoError(t, err) 690 query, err := ledger.NewQuery(state, []ledger.Key{*key}) 691 assert.NoError(t, err) 692 rv, err := led.Get(query) 693 assert.NoError(t, err) 694 assert.True(t, value.Equals(rv[0])) 695 j++ 696 if j >= numHistLookupPerStep { 697 break 698 } 699 } 700 state = newState 701 } 702 <-led.Done() 703 <-compactor.Done() 704 }) 705 } 706 } 707 708 func Test_ExportCheckpointAt(t *testing.T) { 709 t.Run("noop migration", func(t *testing.T) { 710 // the exported state has two key/value pairs 711 // (/1/1/22/2, "A") and (/1/3/22/4, "B") 712 // this tests the migration at the specific state 713 // without any special migration so we expect both 714 // register to show up in the new trie and with the same values 715 unittest.RunWithTempDir(t, func(dbDir string) { 716 unittest.RunWithTempDir(t, func(dir2 string) { 717 718 const ( 719 capacity = 100 720 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 721 checkpointsToKeep = 1 722 ) 723 724 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize) 725 require.NoError(t, err) 726 led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 727 require.NoError(t, err) 728 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 729 require.NoError(t, err) 730 <-compactor.Ready() 731 732 state := led.InitialState() 733 u := testutils.UpdateFixture() 734 u.SetState(state) 735 736 state, _, err = led.Set(u) 737 require.NoError(t, err) 738 739 newState, err := led.ExportCheckpointAt(state, []ledger.Migration{noOpMigration}, []ledger.Reporter{}, []ledger.Reporter{}, complete.DefaultPathFinderVersion, dir2, "root.checkpoint") 740 require.NoError(t, err) 741 assert.Equal(t, newState, state) 742 743 diskWal2, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir2, capacity, pathfinder.PathByteSize, wal.SegmentSize) 744 require.NoError(t, err) 745 led2, err := complete.NewLedger(diskWal2, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 746 require.NoError(t, err) 747 compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 748 require.NoError(t, err) 749 <-compactor2.Ready() 750 751 q, err := ledger.NewQuery(state, u.Keys()) 752 require.NoError(t, err) 753 754 retValues, err := led2.Get(q) 755 require.NoError(t, err) 756 757 for i, v := range u.Values() { 758 assert.Equal(t, v, retValues[i]) 759 } 760 761 <-led.Done() 762 <-compactor.Done() 763 <-led2.Done() 764 <-compactor2.Done() 765 }) 766 }) 767 }) 768 t.Run("migration by value", func(t *testing.T) { 769 // the exported state has two key/value pairs 770 // ("/1/1/22/2", "A") and ("/1/3/22/4", "B") 771 // during the migration we change all keys with value "A" to "C" 772 // so in this case the resulting exported trie is ("/1/1/22/2", "C"), ("/1/3/22/4", "B") 773 unittest.RunWithTempDir(t, func(dbDir string) { 774 unittest.RunWithTempDir(t, func(dir2 string) { 775 776 const ( 777 capacity = 100 778 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 779 checkpointsToKeep = 1 780 ) 781 782 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize) 783 require.NoError(t, err) 784 led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 785 require.NoError(t, err) 786 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 787 require.NoError(t, err) 788 <-compactor.Ready() 789 790 state := led.InitialState() 791 u := testutils.UpdateFixture() 792 u.SetState(state) 793 794 state, _, err = led.Set(u) 795 require.NoError(t, err) 796 797 newState, err := led.ExportCheckpointAt(state, []ledger.Migration{migrationByValue}, []ledger.Reporter{}, []ledger.Reporter{}, complete.DefaultPathFinderVersion, dir2, "root.checkpoint") 798 require.NoError(t, err) 799 800 diskWal2, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir2, capacity, pathfinder.PathByteSize, wal.SegmentSize) 801 require.NoError(t, err) 802 led2, err := complete.NewLedger(diskWal2, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 803 require.NoError(t, err) 804 compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 805 require.NoError(t, err) 806 <-compactor2.Ready() 807 808 q, err := ledger.NewQuery(newState, u.Keys()) 809 require.NoError(t, err) 810 811 retValues, err := led2.Get(q) 812 require.NoError(t, err) 813 814 assert.Equal(t, retValues[0], ledger.Value([]byte{'C'})) 815 assert.Equal(t, retValues[1], ledger.Value([]byte{'B'})) 816 817 <-led.Done() 818 <-compactor.Done() 819 <-led2.Done() 820 <-compactor2.Done() 821 }) 822 }) 823 }) 824 t.Run("migration by key", func(t *testing.T) { 825 // the exported state has two key/value pairs 826 // ("/1/1/22/2", "A") and ("/1/3/22/4", "B") 827 // during the migration we change the value to "D" for key "zero" 828 // so in this case the resulting exported trie is ("/1/1/22/2", "D"), ("/1/3/22/4", "B") 829 unittest.RunWithTempDir(t, func(dbDir string) { 830 unittest.RunWithTempDir(t, func(dir2 string) { 831 832 const ( 833 capacity = 100 834 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 835 checkpointsToKeep = 1 836 ) 837 838 diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize) 839 require.NoError(t, err) 840 led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 841 require.NoError(t, err) 842 compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 843 require.NoError(t, err) 844 <-compactor.Ready() 845 846 state := led.InitialState() 847 u := testutils.UpdateFixture() 848 u.SetState(state) 849 850 state, _, err = led.Set(u) 851 require.NoError(t, err) 852 853 newState, err := led.ExportCheckpointAt(state, []ledger.Migration{migrationByKey}, []ledger.Reporter{}, []ledger.Reporter{}, complete.DefaultPathFinderVersion, dir2, "root.checkpoint") 854 require.NoError(t, err) 855 856 diskWal2, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir2, capacity, pathfinder.PathByteSize, wal.SegmentSize) 857 require.NoError(t, err) 858 led2, err := complete.NewLedger(diskWal2, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 859 require.NoError(t, err) 860 compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 861 require.NoError(t, err) 862 <-compactor2.Ready() 863 864 q, err := ledger.NewQuery(newState, u.Keys()) 865 require.NoError(t, err) 866 867 retValues, err := led2.Get(q) 868 require.NoError(t, err) 869 870 assert.Equal(t, retValues[0], ledger.Value([]byte{'D'})) 871 assert.Equal(t, retValues[1], ledger.Value([]byte{'B'})) 872 873 <-led.Done() 874 <-compactor.Done() 875 <-led2.Done() 876 <-compactor2.Done() 877 }) 878 }) 879 }) 880 } 881 882 func TestWALUpdateFailuresBubbleUp(t *testing.T) { 883 unittest.RunWithTempDir(t, func(dir string) { 884 885 const ( 886 capacity = 100 887 checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. 888 checkpointsToKeep = 1 889 ) 890 891 theError := fmt.Errorf("error error") 892 893 metricsCollector := &metrics.NoopCollector{} 894 895 diskWAL, err := wal.NewDiskWAL(zerolog.Nop(), nil, metricsCollector, dir, capacity, pathfinder.PathByteSize, wal.SegmentSize) 896 require.NoError(t, err) 897 898 w := &CustomUpdateWAL{ 899 DiskWAL: diskWAL, 900 updateFn: func(update *ledger.TrieUpdate) (int, bool, error) { 901 return 0, false, theError 902 }, 903 } 904 905 led, err := complete.NewLedger(w, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) 906 require.NoError(t, err) 907 908 compactor, err := complete.NewCompactor(led, w, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) 909 require.NoError(t, err) 910 911 <-compactor.Ready() 912 913 defer func() { 914 <-led.Done() 915 <-compactor.Done() 916 }() 917 918 key := ledger.NewKey([]ledger.KeyPart{ledger.NewKeyPart(0, []byte{1, 2, 3})}) 919 920 values := []ledger.Value{[]byte{1, 2, 3}} 921 update, err := ledger.NewUpdate(led.InitialState(), []ledger.Key{key}, values) 922 require.NoError(t, err) 923 924 _, _, err = led.Set(update) 925 require.Error(t, err) 926 require.True(t, errors.Is(err, theError)) 927 }) 928 } 929 930 func valuesMatches(expected []ledger.Value, got []ledger.Value) bool { 931 if len(expected) != len(got) { 932 return false 933 } 934 // replace nils 935 for i, v := range got { 936 if v == nil { 937 got[i] = []byte{} 938 } 939 if !bytes.Equal(expected[i], got[i]) { 940 return false 941 } 942 } 943 return true 944 } 945 946 func noOpMigration(p []ledger.Payload) ([]ledger.Payload, error) { 947 return p, nil 948 } 949 950 func migrationByValue(p []ledger.Payload) ([]ledger.Payload, error) { 951 ret := make([]ledger.Payload, 0, len(p)) 952 for _, p := range p { 953 if p.Value().Equals([]byte{'A'}) { 954 k, err := p.Key() 955 if err != nil { 956 return nil, err 957 } 958 pp := *ledger.NewPayload(k, ledger.Value([]byte{'C'})) 959 ret = append(ret, pp) 960 } else { 961 ret = append(ret, p) 962 } 963 } 964 return ret, nil 965 } 966 967 type CustomUpdateWAL struct { 968 *wal.DiskWAL 969 updateFn func(update *ledger.TrieUpdate) (int, bool, error) 970 } 971 972 func (w *CustomUpdateWAL) RecordUpdate(update *ledger.TrieUpdate) (int, bool, error) { 973 return w.updateFn(update) 974 } 975 976 func migrationByKey(p []ledger.Payload) ([]ledger.Payload, error) { 977 ret := make([]ledger.Payload, 0, len(p)) 978 for _, p := range p { 979 k, err := p.Key() 980 if err != nil { 981 return nil, err 982 } 983 if k.String() == "/1/1/22/2" { 984 pp := *ledger.NewPayload(k, ledger.Value([]byte{'D'})) 985 ret = append(ret, pp) 986 } else { 987 ret = append(ret, p) 988 } 989 } 990 991 return ret, nil 992 }