github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/core/state/snapshot/disklayer_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "io/ioutil" 22 "os" 23 "testing" 24 25 "github.com/VictoriaMetrics/fastcache" 26 27 "github.com/scroll-tech/go-ethereum/common" 28 "github.com/scroll-tech/go-ethereum/core/rawdb" 29 "github.com/scroll-tech/go-ethereum/ethdb" 30 "github.com/scroll-tech/go-ethereum/ethdb/leveldb" 31 "github.com/scroll-tech/go-ethereum/ethdb/memorydb" 32 "github.com/scroll-tech/go-ethereum/rlp" 33 ) 34 35 // reverse reverses the contents of a byte slice. It's used to update random accs 36 // with deterministic changes. 37 func reverse(blob []byte) []byte { 38 res := make([]byte, len(blob)) 39 for i, b := range blob { 40 res[len(blob)-1-i] = b 41 } 42 return res 43 } 44 45 // Tests that merging something into a disk layer persists it into the database 46 // and invalidates any previously written and cached values. 47 func TestDiskMerge(t *testing.T) { 48 // Create some accounts in the disk layer 49 db := memorydb.New() 50 51 var ( 52 accNoModNoCache = common.Hash{0x1} 53 accNoModCache = common.Hash{0x2} 54 accModNoCache = common.Hash{0x3} 55 accModCache = common.Hash{0x4} 56 accDelNoCache = common.Hash{0x5} 57 accDelCache = common.Hash{0x6} 58 conNoModNoCache = common.Hash{0x7} 59 conNoModNoCacheSlot = common.Hash{0x70} 60 conNoModCache = common.Hash{0x8} 61 conNoModCacheSlot = common.Hash{0x80} 62 conModNoCache = common.Hash{0x9} 63 conModNoCacheSlot = common.Hash{0x90} 64 conModCache = common.Hash{0xa} 65 conModCacheSlot = common.Hash{0xa0} 66 conDelNoCache = common.Hash{0xb} 67 conDelNoCacheSlot = common.Hash{0xb0} 68 conDelCache = common.Hash{0xc} 69 conDelCacheSlot = common.Hash{0xc0} 70 conNukeNoCache = common.Hash{0xd} 71 conNukeNoCacheSlot = common.Hash{0xd0} 72 conNukeCache = common.Hash{0xe} 73 conNukeCacheSlot = common.Hash{0xe0} 74 baseRoot = randomHash() 75 diffRoot = randomHash() 76 ) 77 78 rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:]) 79 rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:]) 80 rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:]) 81 rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:]) 82 rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:]) 83 rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:]) 84 85 rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:]) 86 rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 87 rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:]) 88 rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 89 rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:]) 90 rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) 91 rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:]) 92 rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:]) 93 rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:]) 94 rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) 95 rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:]) 96 rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:]) 97 98 rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:]) 99 rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) 100 rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:]) 101 rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) 102 103 rawdb.WriteSnapshotRoot(db, baseRoot) 104 105 // Create a disk layer based on the above and cache in some data 106 snaps := &Tree{ 107 layers: map[common.Hash]snapshot{ 108 baseRoot: &diskLayer{ 109 diskdb: db, 110 cache: fastcache.New(500 * 1024), 111 root: baseRoot, 112 }, 113 }, 114 } 115 base := snaps.Snapshot(baseRoot) 116 base.AccountRLP(accNoModCache) 117 base.AccountRLP(accModCache) 118 base.AccountRLP(accDelCache) 119 base.Storage(conNoModCache, conNoModCacheSlot) 120 base.Storage(conModCache, conModCacheSlot) 121 base.Storage(conDelCache, conDelCacheSlot) 122 base.Storage(conNukeCache, conNukeCacheSlot) 123 124 // Modify or delete some accounts, flatten everything onto disk 125 if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ 126 accDelNoCache: {}, 127 accDelCache: {}, 128 conNukeNoCache: {}, 129 conNukeCache: {}, 130 }, map[common.Hash][]byte{ 131 accModNoCache: reverse(accModNoCache[:]), 132 accModCache: reverse(accModCache[:]), 133 }, map[common.Hash]map[common.Hash][]byte{ 134 conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, 135 conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, 136 conDelNoCache: {conDelNoCacheSlot: nil}, 137 conDelCache: {conDelCacheSlot: nil}, 138 }); err != nil { 139 t.Fatalf("failed to update snapshot tree: %v", err) 140 } 141 if err := snaps.Cap(diffRoot, 0); err != nil { 142 t.Fatalf("failed to flatten snapshot tree: %v", err) 143 } 144 // Retrieve all the data through the disk layer and validate it 145 base = snaps.Snapshot(diffRoot) 146 if _, ok := base.(*diskLayer); !ok { 147 t.Fatalf("update not flattend into the disk layer") 148 } 149 150 // assertAccount ensures that an account matches the given blob. 151 assertAccount := func(account common.Hash, data []byte) { 152 t.Helper() 153 blob, err := base.AccountRLP(account) 154 if err != nil { 155 t.Errorf("account access (%x) failed: %v", account, err) 156 } else if !bytes.Equal(blob, data) { 157 t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data) 158 } 159 } 160 assertAccount(accNoModNoCache, accNoModNoCache[:]) 161 assertAccount(accNoModCache, accNoModCache[:]) 162 assertAccount(accModNoCache, reverse(accModNoCache[:])) 163 assertAccount(accModCache, reverse(accModCache[:])) 164 assertAccount(accDelNoCache, nil) 165 assertAccount(accDelCache, nil) 166 167 // assertStorage ensures that a storage slot matches the given blob. 168 assertStorage := func(account common.Hash, slot common.Hash, data []byte) { 169 t.Helper() 170 blob, err := base.Storage(account, slot) 171 if err != nil { 172 t.Errorf("storage access (%x:%x) failed: %v", account, slot, err) 173 } else if !bytes.Equal(blob, data) { 174 t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) 175 } 176 } 177 assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 178 assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 179 assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 180 assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 181 assertStorage(conDelNoCache, conDelNoCacheSlot, nil) 182 assertStorage(conDelCache, conDelCacheSlot, nil) 183 assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 184 assertStorage(conNukeCache, conNukeCacheSlot, nil) 185 186 // Retrieve all the data directly from the database and validate it 187 188 // assertDatabaseAccount ensures that an account from the database matches the given blob. 189 assertDatabaseAccount := func(account common.Hash, data []byte) { 190 t.Helper() 191 if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) { 192 t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data) 193 } 194 } 195 assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) 196 assertDatabaseAccount(accNoModCache, accNoModCache[:]) 197 assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) 198 assertDatabaseAccount(accModCache, reverse(accModCache[:])) 199 assertDatabaseAccount(accDelNoCache, nil) 200 assertDatabaseAccount(accDelCache, nil) 201 202 // assertDatabaseStorage ensures that a storage slot from the database matches the given blob. 203 assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { 204 t.Helper() 205 if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) { 206 t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) 207 } 208 } 209 assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 210 assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 211 assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 212 assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 213 assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) 214 assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) 215 assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 216 assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) 217 } 218 219 // Tests that merging something into a disk layer persists it into the database 220 // and invalidates any previously written and cached values, discarding anything 221 // after the in-progress generation marker. 222 func TestDiskPartialMerge(t *testing.T) { 223 // Iterate the test a few times to ensure we pick various internal orderings 224 // for the data slots as well as the progress marker. 225 for i := 0; i < 1024; i++ { 226 // Create some accounts in the disk layer 227 db := memorydb.New() 228 229 var ( 230 accNoModNoCache = randomHash() 231 accNoModCache = randomHash() 232 accModNoCache = randomHash() 233 accModCache = randomHash() 234 accDelNoCache = randomHash() 235 accDelCache = randomHash() 236 conNoModNoCache = randomHash() 237 conNoModNoCacheSlot = randomHash() 238 conNoModCache = randomHash() 239 conNoModCacheSlot = randomHash() 240 conModNoCache = randomHash() 241 conModNoCacheSlot = randomHash() 242 conModCache = randomHash() 243 conModCacheSlot = randomHash() 244 conDelNoCache = randomHash() 245 conDelNoCacheSlot = randomHash() 246 conDelCache = randomHash() 247 conDelCacheSlot = randomHash() 248 conNukeNoCache = randomHash() 249 conNukeNoCacheSlot = randomHash() 250 conNukeCache = randomHash() 251 conNukeCacheSlot = randomHash() 252 baseRoot = randomHash() 253 diffRoot = randomHash() 254 genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) 255 ) 256 257 // insertAccount injects an account into the database if it's after the 258 // generator marker, drops the op otherwise. This is needed to seed the 259 // database with a valid starting snapshot. 260 insertAccount := func(account common.Hash, data []byte) { 261 if bytes.Compare(account[:], genMarker) <= 0 { 262 rawdb.WriteAccountSnapshot(db, account, data[:]) 263 } 264 } 265 insertAccount(accNoModNoCache, accNoModNoCache[:]) 266 insertAccount(accNoModCache, accNoModCache[:]) 267 insertAccount(accModNoCache, accModNoCache[:]) 268 insertAccount(accModCache, accModCache[:]) 269 insertAccount(accDelNoCache, accDelNoCache[:]) 270 insertAccount(accDelCache, accDelCache[:]) 271 272 // insertStorage injects a storage slot into the database if it's after 273 // the generator marker, drops the op otherwise. This is needed to seed 274 // the database with a valid starting snapshot. 275 insertStorage := func(account common.Hash, slot common.Hash, data []byte) { 276 if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 { 277 rawdb.WriteStorageSnapshot(db, account, slot, data[:]) 278 } 279 } 280 insertAccount(conNoModNoCache, conNoModNoCache[:]) 281 insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 282 insertAccount(conNoModCache, conNoModCache[:]) 283 insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 284 insertAccount(conModNoCache, conModNoCache[:]) 285 insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) 286 insertAccount(conModCache, conModCache[:]) 287 insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) 288 insertAccount(conDelNoCache, conDelNoCache[:]) 289 insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) 290 insertAccount(conDelCache, conDelCache[:]) 291 insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) 292 293 insertAccount(conNukeNoCache, conNukeNoCache[:]) 294 insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) 295 insertAccount(conNukeCache, conNukeCache[:]) 296 insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) 297 298 rawdb.WriteSnapshotRoot(db, baseRoot) 299 300 // Create a disk layer based on the above using a random progress marker 301 // and cache in some data. 302 snaps := &Tree{ 303 layers: map[common.Hash]snapshot{ 304 baseRoot: &diskLayer{ 305 diskdb: db, 306 cache: fastcache.New(500 * 1024), 307 root: baseRoot, 308 }, 309 }, 310 } 311 snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker 312 base := snaps.Snapshot(baseRoot) 313 314 // assertAccount ensures that an account matches the given blob if it's 315 // already covered by the disk snapshot, and errors out otherwise. 316 assertAccount := func(account common.Hash, data []byte) { 317 t.Helper() 318 blob, err := base.AccountRLP(account) 319 if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet { 320 t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob) 321 } 322 if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { 323 t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) 324 } 325 } 326 assertAccount(accNoModCache, accNoModCache[:]) 327 assertAccount(accModCache, accModCache[:]) 328 assertAccount(accDelCache, accDelCache[:]) 329 330 // assertStorage ensures that a storage slot matches the given blob if 331 // it's already covered by the disk snapshot, and errors out otherwise. 332 assertStorage := func(account common.Hash, slot common.Hash, data []byte) { 333 t.Helper() 334 blob, err := base.Storage(account, slot) 335 if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet { 336 t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) 337 } 338 if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { 339 t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) 340 } 341 } 342 assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 343 assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) 344 assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) 345 assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) 346 347 // Modify or delete some accounts, flatten everything onto disk 348 if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ 349 accDelNoCache: {}, 350 accDelCache: {}, 351 conNukeNoCache: {}, 352 conNukeCache: {}, 353 }, map[common.Hash][]byte{ 354 accModNoCache: reverse(accModNoCache[:]), 355 accModCache: reverse(accModCache[:]), 356 }, map[common.Hash]map[common.Hash][]byte{ 357 conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, 358 conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, 359 conDelNoCache: {conDelNoCacheSlot: nil}, 360 conDelCache: {conDelCacheSlot: nil}, 361 }); err != nil { 362 t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) 363 } 364 if err := snaps.Cap(diffRoot, 0); err != nil { 365 t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err) 366 } 367 // Retrieve all the data through the disk layer and validate it 368 base = snaps.Snapshot(diffRoot) 369 if _, ok := base.(*diskLayer); !ok { 370 t.Fatalf("test %d: update not flattend into the disk layer", i) 371 } 372 assertAccount(accNoModNoCache, accNoModNoCache[:]) 373 assertAccount(accNoModCache, accNoModCache[:]) 374 assertAccount(accModNoCache, reverse(accModNoCache[:])) 375 assertAccount(accModCache, reverse(accModCache[:])) 376 assertAccount(accDelNoCache, nil) 377 assertAccount(accDelCache, nil) 378 379 assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 380 assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 381 assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 382 assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 383 assertStorage(conDelNoCache, conDelNoCacheSlot, nil) 384 assertStorage(conDelCache, conDelCacheSlot, nil) 385 assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 386 assertStorage(conNukeCache, conNukeCacheSlot, nil) 387 388 // Retrieve all the data directly from the database and validate it 389 390 // assertDatabaseAccount ensures that an account inside the database matches 391 // the given blob if it's already covered by the disk snapshot, and does not 392 // exist otherwise. 393 assertDatabaseAccount := func(account common.Hash, data []byte) { 394 t.Helper() 395 blob := rawdb.ReadAccountSnapshot(db, account) 396 if bytes.Compare(account[:], genMarker) > 0 && blob != nil { 397 t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob) 398 } 399 if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { 400 t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) 401 } 402 } 403 assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) 404 assertDatabaseAccount(accNoModCache, accNoModCache[:]) 405 assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) 406 assertDatabaseAccount(accModCache, reverse(accModCache[:])) 407 assertDatabaseAccount(accDelNoCache, nil) 408 assertDatabaseAccount(accDelCache, nil) 409 410 // assertDatabaseStorage ensures that a storage slot inside the database 411 // matches the given blob if it's already covered by the disk snapshot, 412 // and does not exist otherwise. 413 assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { 414 t.Helper() 415 blob := rawdb.ReadStorageSnapshot(db, account, slot) 416 if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil { 417 t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) 418 } 419 if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { 420 t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) 421 } 422 } 423 assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 424 assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 425 assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 426 assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 427 assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) 428 assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) 429 assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 430 assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) 431 } 432 } 433 434 // Tests that when the bottom-most diff layer is merged into the disk 435 // layer whether the corresponding generator is persisted correctly. 436 func TestDiskGeneratorPersistence(t *testing.T) { 437 var ( 438 accOne = randomHash() 439 accTwo = randomHash() 440 accOneSlotOne = randomHash() 441 accOneSlotTwo = randomHash() 442 443 accThree = randomHash() 444 accThreeSlot = randomHash() 445 baseRoot = randomHash() 446 diffRoot = randomHash() 447 diffTwoRoot = randomHash() 448 genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) 449 ) 450 // Testing scenario 1, the disk layer is still under the construction. 451 db := rawdb.NewMemoryDatabase() 452 453 rawdb.WriteAccountSnapshot(db, accOne, accOne[:]) 454 rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:]) 455 rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:]) 456 rawdb.WriteSnapshotRoot(db, baseRoot) 457 458 // Create a disk layer based on all above updates 459 snaps := &Tree{ 460 layers: map[common.Hash]snapshot{ 461 baseRoot: &diskLayer{ 462 diskdb: db, 463 cache: fastcache.New(500 * 1024), 464 root: baseRoot, 465 genMarker: genMarker, 466 }, 467 }, 468 } 469 // Modify or delete some accounts, flatten everything onto disk 470 if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{ 471 accTwo: accTwo[:], 472 }, nil); err != nil { 473 t.Fatalf("failed to update snapshot tree: %v", err) 474 } 475 if err := snaps.Cap(diffRoot, 0); err != nil { 476 t.Fatalf("failed to flatten snapshot tree: %v", err) 477 } 478 blob := rawdb.ReadSnapshotGenerator(db) 479 var generator journalGenerator 480 if err := rlp.DecodeBytes(blob, &generator); err != nil { 481 t.Fatalf("Failed to decode snapshot generator %v", err) 482 } 483 if !bytes.Equal(generator.Marker, genMarker) { 484 t.Fatalf("Generator marker is not matched") 485 } 486 // Test scenario 2, the disk layer is fully generated 487 // Modify or delete some accounts, flatten everything onto disk 488 if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{ 489 accThree: accThree.Bytes(), 490 }, map[common.Hash]map[common.Hash][]byte{ 491 accThree: {accThreeSlot: accThreeSlot.Bytes()}, 492 }); err != nil { 493 t.Fatalf("failed to update snapshot tree: %v", err) 494 } 495 diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer) 496 diskLayer.genMarker = nil // Construction finished 497 if err := snaps.Cap(diffTwoRoot, 0); err != nil { 498 t.Fatalf("failed to flatten snapshot tree: %v", err) 499 } 500 blob = rawdb.ReadSnapshotGenerator(db) 501 if err := rlp.DecodeBytes(blob, &generator); err != nil { 502 t.Fatalf("Failed to decode snapshot generator %v", err) 503 } 504 if len(generator.Marker) != 0 { 505 t.Fatalf("Failed to update snapshot generator") 506 } 507 } 508 509 // Tests that merging something into a disk layer persists it into the database 510 // and invalidates any previously written and cached values, discarding anything 511 // after the in-progress generation marker. 512 // 513 // This test case is a tiny specialized case of TestDiskPartialMerge, which tests 514 // some very specific cornercases that random tests won't ever trigger. 515 func TestDiskMidAccountPartialMerge(t *testing.T) { 516 // TODO(@karalabe) ? 517 } 518 519 // TestDiskSeek tests that seek-operations work on the disk layer 520 func TestDiskSeek(t *testing.T) { 521 // Create some accounts in the disk layer 522 var db ethdb.Database 523 524 if dir, err := ioutil.TempDir("", "disklayer-test"); err != nil { 525 t.Fatal(err) 526 } else { 527 defer os.RemoveAll(dir) 528 diskdb, err := leveldb.New(dir, 256, 0, "", false) 529 if err != nil { 530 t.Fatal(err) 531 } 532 db = rawdb.NewDatabase(diskdb) 533 } 534 // Fill even keys [0,2,4...] 535 for i := 0; i < 0xff; i += 2 { 536 acc := common.Hash{byte(i)} 537 rawdb.WriteAccountSnapshot(db, acc, acc[:]) 538 } 539 // Add an 'higher' key, with incorrect (higher) prefix 540 highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1} 541 db.Put(highKey, []byte{0xff, 0xff}) 542 543 baseRoot := randomHash() 544 rawdb.WriteSnapshotRoot(db, baseRoot) 545 546 snaps := &Tree{ 547 layers: map[common.Hash]snapshot{ 548 baseRoot: &diskLayer{ 549 diskdb: db, 550 cache: fastcache.New(500 * 1024), 551 root: baseRoot, 552 }, 553 }, 554 } 555 // Test some different seek positions 556 type testcase struct { 557 pos byte 558 expkey byte 559 } 560 var cases = []testcase{ 561 {0xff, 0x55}, // this should exit immediately without checking key 562 {0x01, 0x02}, 563 {0xfe, 0xfe}, 564 {0xfd, 0xfe}, 565 {0x00, 0x00}, 566 } 567 for i, tc := range cases { 568 it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos}) 569 if err != nil { 570 t.Fatalf("case %d, error: %v", i, err) 571 } 572 count := 0 573 for it.Next() { 574 k, v, err := it.Hash()[0], it.Account()[0], it.Error() 575 if err != nil { 576 t.Fatalf("test %d, item %d, error: %v", i, count, err) 577 } 578 // First item in iterator should have the expected key 579 if count == 0 && k != tc.expkey { 580 t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey) 581 } 582 count++ 583 if v != k { 584 t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k) 585 } 586 } 587 } 588 }