github.com/MetalBlockchain/subnet-evm@v0.4.9/core/state/snapshot/disklayer_test.go (about) 1 // (c) 2019-2020, Ava Labs, Inc. 2 // 3 // This file is a derived work, based on the go-ethereum library whose original 4 // notices appear below. 5 // 6 // It is distributed under a license compatible with the licensing terms of the 7 // original code from which it is derived. 8 // 9 // Much love to the original authors for their work. 10 // ********** 11 // Copyright 2019 The go-ethereum Authors 12 // This file is part of the go-ethereum library. 13 // 14 // The go-ethereum library is free software: you can redistribute it and/or modify 15 // it under the terms of the GNU Lesser General Public License as published by 16 // the Free Software Foundation, either version 3 of the License, or 17 // (at your option) any later version. 18 // 19 // The go-ethereum library is distributed in the hope that it will be useful, 20 // but WITHOUT ANY WARRANTY; without even the implied warranty of 21 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 // GNU Lesser General Public License for more details. 23 // 24 // You should have received a copy of the GNU Lesser General Public License 25 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 26 27 package snapshot 28 29 import ( 30 "bytes" 31 "testing" 32 33 "github.com/MetalBlockchain/subnet-evm/core/rawdb" 34 "github.com/MetalBlockchain/subnet-evm/ethdb/memorydb" 35 "github.com/ethereum/go-ethereum/common" 36 "github.com/ethereum/go-ethereum/rlp" 37 ) 38 39 // reverse reverses the contents of a byte slice. It's used to update random accs 40 // with deterministic changes. 41 func reverse(blob []byte) []byte { 42 res := make([]byte, len(blob)) 43 for i, b := range blob { 44 res[len(blob)-1-i] = b 45 } 46 return res 47 } 48 49 // Tests that merging something into a disk layer persists it into the database 50 // and invalidates any previously written and cached values. 51 func TestDiskMerge(t *testing.T) { 52 // Create some accounts in the disk layer 53 db := memorydb.New() 54 55 var ( 56 accNoModNoCache = common.Hash{0x1} 57 accNoModCache = common.Hash{0x2} 58 accModNoCache = common.Hash{0x3} 59 accModCache = common.Hash{0x4} 60 accDelNoCache = common.Hash{0x5} 61 accDelCache = common.Hash{0x6} 62 conNoModNoCache = common.Hash{0x7} 63 conNoModNoCacheSlot = common.Hash{0x70} 64 conNoModCache = common.Hash{0x8} 65 conNoModCacheSlot = common.Hash{0x80} 66 conModNoCache = common.Hash{0x9} 67 conModNoCacheSlot = common.Hash{0x90} 68 conModCache = common.Hash{0xa} 69 conModCacheSlot = common.Hash{0xa0} 70 conDelNoCache = common.Hash{0xb} 71 conDelNoCacheSlot = common.Hash{0xb0} 72 conDelCache = common.Hash{0xc} 73 conDelCacheSlot = common.Hash{0xc0} 74 conNukeNoCache = common.Hash{0xd} 75 conNukeNoCacheSlot = common.Hash{0xd0} 76 conNukeCache = common.Hash{0xe} 77 conNukeCacheSlot = common.Hash{0xe0} 78 baseRoot = randomHash() 79 baseBlockHash = randomHash() 80 diffRoot = randomHash() 81 diffBlockHash = randomHash() 82 ) 83 84 rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:]) 85 rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:]) 86 rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:]) 87 rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:]) 88 rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:]) 89 rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:]) 90 91 rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:]) 92 rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 93 rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:]) 94 rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 95 rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:]) 96 rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) 97 rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:]) 98 rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:]) 99 rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:]) 100 rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) 101 rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:]) 102 rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:]) 103 104 rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:]) 105 rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) 106 rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:]) 107 rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) 108 109 rawdb.WriteSnapshotBlockHash(db, baseBlockHash) 110 rawdb.WriteSnapshotRoot(db, baseRoot) 111 112 // Create a disk layer based on the above and cache in some data 113 snaps := NewTestTree(db, baseBlockHash, baseRoot) 114 base := snaps.Snapshot(baseRoot) 115 base.AccountRLP(accNoModCache) 116 base.AccountRLP(accModCache) 117 base.AccountRLP(accDelCache) 118 base.Storage(conNoModCache, conNoModCacheSlot) 119 base.Storage(conModCache, conModCacheSlot) 120 base.Storage(conDelCache, conDelCacheSlot) 121 base.Storage(conNukeCache, conNukeCacheSlot) 122 123 // Modify or delete some accounts, flatten everything onto disk 124 if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{ 125 accDelNoCache: {}, 126 accDelCache: {}, 127 conNukeNoCache: {}, 128 conNukeCache: {}, 129 }, map[common.Hash][]byte{ 130 accModNoCache: reverse(accModNoCache[:]), 131 accModCache: reverse(accModCache[:]), 132 }, map[common.Hash]map[common.Hash][]byte{ 133 conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, 134 conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, 135 conDelNoCache: {conDelNoCacheSlot: nil}, 136 conDelCache: {conDelCacheSlot: nil}, 137 }); err != nil { 138 t.Fatalf("failed to update snapshot tree: %v", err) 139 } 140 snaps.verified = true // Bypass validation of junk data 141 if err := snaps.Flatten(diffBlockHash); err != nil { 142 t.Fatalf("failed to flatten snapshot tree: %v", err) 143 } 144 // Retrieve all the data through the disk layer and validate it 145 base = snaps.Snapshot(diffRoot) 146 if _, ok := base.(*diskLayer); !ok { 147 t.Fatalf("update not flattend into the disk layer") 148 } 149 150 // assertAccount ensures that an account matches the given blob. 151 assertAccount := func(account common.Hash, data []byte) { 152 t.Helper() 153 blob, err := base.AccountRLP(account) 154 if err != nil { 155 t.Errorf("account access (%x) failed: %v", account, err) 156 } else if !bytes.Equal(blob, data) { 157 t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data) 158 } 159 } 160 assertAccount(accNoModNoCache, accNoModNoCache[:]) 161 assertAccount(accNoModCache, accNoModCache[:]) 162 assertAccount(accModNoCache, reverse(accModNoCache[:])) 163 assertAccount(accModCache, reverse(accModCache[:])) 164 assertAccount(accDelNoCache, nil) 165 assertAccount(accDelCache, nil) 166 167 // assertStorage ensures that a storage slot matches the given blob. 168 assertStorage := func(account common.Hash, slot common.Hash, data []byte) { 169 t.Helper() 170 blob, err := base.Storage(account, slot) 171 if err != nil { 172 t.Errorf("storage access (%x:%x) failed: %v", account, slot, err) 173 } else if !bytes.Equal(blob, data) { 174 t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) 175 } 176 } 177 assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 178 assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 179 assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 180 assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 181 assertStorage(conDelNoCache, conDelNoCacheSlot, nil) 182 assertStorage(conDelCache, conDelCacheSlot, nil) 183 assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 184 assertStorage(conNukeCache, conNukeCacheSlot, nil) 185 186 // Retrieve all the data directly from the database and validate it 187 188 // assertDatabaseAccount ensures that an account from the database matches the given blob. 189 assertDatabaseAccount := func(account common.Hash, data []byte) { 190 t.Helper() 191 if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) { 192 t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data) 193 } 194 } 195 assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) 196 assertDatabaseAccount(accNoModCache, accNoModCache[:]) 197 assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) 198 assertDatabaseAccount(accModCache, reverse(accModCache[:])) 199 assertDatabaseAccount(accDelNoCache, nil) 200 assertDatabaseAccount(accDelCache, nil) 201 202 // assertDatabaseStorage ensures that a storage slot from the database matches the given blob. 203 assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { 204 t.Helper() 205 if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) { 206 t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) 207 } 208 } 209 assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 210 assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 211 assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 212 assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 213 assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) 214 assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) 215 assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 216 assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) 217 } 218 219 // Tests that merging something into a disk layer persists it into the database 220 // and invalidates any previously written and cached values, discarding anything 221 // after the in-progress generation marker. 222 func TestDiskPartialMerge(t *testing.T) { 223 // Iterate the test a few times to ensure we pick various internal orderings 224 // for the data slots as well as the progress marker. 225 for i := 0; i < 1024; i++ { 226 // Create some accounts in the disk layer 227 db := memorydb.New() 228 229 var ( 230 accNoModNoCache = randomHash() 231 accNoModCache = randomHash() 232 accModNoCache = randomHash() 233 accModCache = randomHash() 234 accDelNoCache = randomHash() 235 accDelCache = randomHash() 236 conNoModNoCache = randomHash() 237 conNoModNoCacheSlot = randomHash() 238 conNoModCache = randomHash() 239 conNoModCacheSlot = randomHash() 240 conModNoCache = randomHash() 241 conModNoCacheSlot = randomHash() 242 conModCache = randomHash() 243 conModCacheSlot = randomHash() 244 conDelNoCache = randomHash() 245 conDelNoCacheSlot = randomHash() 246 conDelCache = randomHash() 247 conDelCacheSlot = randomHash() 248 conNukeNoCache = randomHash() 249 conNukeNoCacheSlot = randomHash() 250 conNukeCache = randomHash() 251 conNukeCacheSlot = randomHash() 252 baseRoot = randomHash() 253 baseBlockHash = randomHash() 254 diffRoot = randomHash() 255 diffBlockHash = randomHash() 256 genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) 257 ) 258 259 // insertAccount injects an account into the database if it's after the 260 // generator marker, drops the op otherwise. This is needed to seed the 261 // database with a valid starting snapshot. 262 insertAccount := func(account common.Hash, data []byte) { 263 if bytes.Compare(account[:], genMarker) <= 0 { 264 rawdb.WriteAccountSnapshot(db, account, data[:]) 265 } 266 } 267 insertAccount(accNoModNoCache, accNoModNoCache[:]) 268 insertAccount(accNoModCache, accNoModCache[:]) 269 insertAccount(accModNoCache, accModNoCache[:]) 270 insertAccount(accModCache, accModCache[:]) 271 insertAccount(accDelNoCache, accDelNoCache[:]) 272 insertAccount(accDelCache, accDelCache[:]) 273 274 // insertStorage injects a storage slot into the database if it's after 275 // the generator marker, drops the op otherwise. This is needed to seed 276 // the database with a valid starting snapshot. 277 insertStorage := func(account common.Hash, slot common.Hash, data []byte) { 278 if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 { 279 rawdb.WriteStorageSnapshot(db, account, slot, data[:]) 280 } 281 } 282 insertAccount(conNoModNoCache, conNoModNoCache[:]) 283 insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 284 insertAccount(conNoModCache, conNoModCache[:]) 285 insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 286 insertAccount(conModNoCache, conModNoCache[:]) 287 insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) 288 insertAccount(conModCache, conModCache[:]) 289 insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) 290 insertAccount(conDelNoCache, conDelNoCache[:]) 291 insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) 292 insertAccount(conDelCache, conDelCache[:]) 293 insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) 294 295 insertAccount(conNukeNoCache, conNukeNoCache[:]) 296 insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) 297 insertAccount(conNukeCache, conNukeCache[:]) 298 insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) 299 300 rawdb.WriteSnapshotBlockHash(db, baseBlockHash) 301 rawdb.WriteSnapshotRoot(db, baseRoot) 302 303 // Create a disk layer based on the above using a random progress marker 304 // and cache in some data. 305 snaps := NewTestTree(db, baseBlockHash, baseRoot) 306 dl := snaps.disklayer() 307 dl.genMarker = genMarker 308 base := snaps.Snapshot(baseRoot) 309 310 // assertAccount ensures that an account matches the given blob if it's 311 // already covered by the disk snapshot, and errors out otherwise. 312 assertAccount := func(account common.Hash, data []byte) { 313 t.Helper() 314 blob, err := base.AccountRLP(account) 315 if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet { 316 t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob) 317 } 318 if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { 319 t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) 320 } 321 } 322 assertAccount(accNoModCache, accNoModCache[:]) 323 assertAccount(accModCache, accModCache[:]) 324 assertAccount(accDelCache, accDelCache[:]) 325 326 // assertStorage ensures that a storage slot matches the given blob if 327 // it's already covered by the disk snapshot, and errors out otherwise. 328 assertStorage := func(account common.Hash, slot common.Hash, data []byte) { 329 t.Helper() 330 blob, err := base.Storage(account, slot) 331 if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet { 332 t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) 333 } 334 if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { 335 t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) 336 } 337 } 338 assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 339 assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) 340 assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) 341 assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) 342 343 // Modify or delete some accounts, flatten everything onto disk 344 if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{ 345 accDelNoCache: {}, 346 accDelCache: {}, 347 conNukeNoCache: {}, 348 conNukeCache: {}, 349 }, map[common.Hash][]byte{ 350 accModNoCache: reverse(accModNoCache[:]), 351 accModCache: reverse(accModCache[:]), 352 }, map[common.Hash]map[common.Hash][]byte{ 353 conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, 354 conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, 355 conDelNoCache: {conDelNoCacheSlot: nil}, 356 conDelCache: {conDelCacheSlot: nil}, 357 }); err != nil { 358 t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) 359 } 360 if err := snaps.Flatten(diffBlockHash); err != nil { 361 t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err) 362 } 363 // Retrieve all the data through the disk layer and validate it 364 base = snaps.Snapshot(diffRoot) 365 if _, ok := base.(*diskLayer); !ok { 366 t.Fatalf("test %d: update not flattend into the disk layer", i) 367 } 368 assertAccount(accNoModNoCache, accNoModNoCache[:]) 369 assertAccount(accNoModCache, accNoModCache[:]) 370 assertAccount(accModNoCache, reverse(accModNoCache[:])) 371 assertAccount(accModCache, reverse(accModCache[:])) 372 assertAccount(accDelNoCache, nil) 373 assertAccount(accDelCache, nil) 374 375 assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 376 assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 377 assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 378 assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 379 assertStorage(conDelNoCache, conDelNoCacheSlot, nil) 380 assertStorage(conDelCache, conDelCacheSlot, nil) 381 assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 382 assertStorage(conNukeCache, conNukeCacheSlot, nil) 383 384 // Retrieve all the data directly from the database and validate it 385 386 // assertDatabaseAccount ensures that an account inside the database matches 387 // the given blob if it's already covered by the disk snapshot, and does not 388 // exist otherwise. 389 assertDatabaseAccount := func(account common.Hash, data []byte) { 390 t.Helper() 391 blob := rawdb.ReadAccountSnapshot(db, account) 392 if bytes.Compare(account[:], genMarker) > 0 && blob != nil { 393 t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob) 394 } 395 if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { 396 t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) 397 } 398 } 399 assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) 400 assertDatabaseAccount(accNoModCache, accNoModCache[:]) 401 assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) 402 assertDatabaseAccount(accModCache, reverse(accModCache[:])) 403 assertDatabaseAccount(accDelNoCache, nil) 404 assertDatabaseAccount(accDelCache, nil) 405 406 // assertDatabaseStorage ensures that a storage slot inside the database 407 // matches the given blob if it's already covered by the disk snapshot, 408 // and does not exist otherwise. 409 assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { 410 t.Helper() 411 blob := rawdb.ReadStorageSnapshot(db, account, slot) 412 if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil { 413 t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) 414 } 415 if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { 416 t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) 417 } 418 } 419 assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 420 assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 421 assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 422 assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 423 assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) 424 assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) 425 assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 426 assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) 427 } 428 } 429 430 // Tests that when the bottom-most diff layer is merged into the disk 431 // layer whether the corresponding generator is persisted correctly. 432 func TestDiskGeneratorPersistence(t *testing.T) { 433 var ( 434 accOne = randomHash() 435 accTwo = randomHash() 436 accOneSlotOne = randomHash() 437 accOneSlotTwo = randomHash() 438 439 accThree = randomHash() 440 accThreeSlot = randomHash() 441 baseRoot = randomHash() 442 baseBlockHash = randomHash() 443 diffRoot = randomHash() 444 diffBlockHash = randomHash() 445 diffTwoRoot = randomHash() 446 diffTwoBlockHash = randomHash() 447 genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) 448 ) 449 // Testing scenario 1, the disk layer is still under the construction. 450 db := rawdb.NewMemoryDatabase() 451 452 rawdb.WriteAccountSnapshot(db, accOne, accOne[:]) 453 rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:]) 454 rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:]) 455 rawdb.WriteSnapshotBlockHash(db, baseBlockHash) 456 rawdb.WriteSnapshotRoot(db, baseRoot) 457 458 // Create a disk layer based on all above updates 459 snaps := NewTestTree(db, baseBlockHash, baseRoot) 460 dl := snaps.disklayer() 461 dl.genMarker = genMarker 462 // Modify or delete some accounts, flatten everything onto disk 463 if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, nil, map[common.Hash][]byte{ 464 accTwo: accTwo[:], 465 }, nil); err != nil { 466 t.Fatalf("failed to update snapshot tree: %v", err) 467 } 468 if err := snaps.Flatten(diffBlockHash); err != nil { 469 t.Fatalf("failed to flatten snapshot tree: %v", err) 470 } 471 blob := rawdb.ReadSnapshotGenerator(db) 472 var generator journalGenerator 473 if err := rlp.DecodeBytes(blob, &generator); err != nil { 474 t.Fatalf("Failed to decode snapshot generator %v", err) 475 } 476 if !bytes.Equal(generator.Marker, genMarker) { 477 t.Fatalf("Generator marker is not matched") 478 } 479 // Test scenario 2, the disk layer is fully generated 480 // Modify or delete some accounts, flatten everything onto disk 481 if err := snaps.Update(diffTwoBlockHash, diffTwoRoot, diffBlockHash, nil, map[common.Hash][]byte{ 482 accThree: accThree.Bytes(), 483 }, map[common.Hash]map[common.Hash][]byte{ 484 accThree: {accThreeSlot: accThreeSlot.Bytes()}, 485 }); err != nil { 486 t.Fatalf("failed to update snapshot tree: %v", err) 487 } 488 dl = snaps.disklayer() 489 dl.genMarker = nil // Construction finished 490 snaps.verified = true // Bypass validation of junk data 491 if err := snaps.Flatten(diffTwoBlockHash); err != nil { 492 t.Fatalf("failed to flatten snapshot tree: %v", err) 493 } 494 blob = rawdb.ReadSnapshotGenerator(db) 495 if err := rlp.DecodeBytes(blob, &generator); err != nil { 496 t.Fatalf("Failed to decode snapshot generator %v", err) 497 } 498 if len(generator.Marker) != 0 { 499 t.Fatalf("Failed to update snapshot generator") 500 } 501 } 502 503 // Tests that merging something into a disk layer persists it into the database 504 // and invalidates any previously written and cached values, discarding anything 505 // after the in-progress generation marker. 506 // 507 // This test case is a tiny specialized case of TestDiskPartialMerge, which tests 508 // some very specific cornercases that random tests won't ever trigger. 509 func TestDiskMidAccountPartialMerge(t *testing.T) { 510 // TODO(@karalabe) ? 511 } 512 513 // TestDiskSeek tests that seek-operations work on the disk layer 514 func TestDiskSeek(t *testing.T) { 515 // Create some accounts in the disk layer 516 db := rawdb.NewMemoryDatabase() 517 defer db.Close() 518 519 // Fill even keys [0,2,4...] 520 for i := 0; i < 0xff; i += 2 { 521 acc := common.Hash{byte(i)} 522 rawdb.WriteAccountSnapshot(db, acc, acc[:]) 523 } 524 // Add an 'higher' key, with incorrect (higher) prefix 525 highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1} 526 db.Put(highKey, []byte{0xff, 0xff}) 527 528 baseRoot := randomHash() 529 baseBlockHash := randomHash() 530 rawdb.WriteSnapshotBlockHash(db, baseBlockHash) 531 rawdb.WriteSnapshotRoot(db, baseRoot) 532 533 snaps := NewTestTree(db, baseBlockHash, baseRoot) 534 // Test some different seek positions 535 type testcase struct { 536 pos byte 537 expkey byte 538 } 539 var cases = []testcase{ 540 {0xff, 0x55}, // this should exit immediately without checking key 541 {0x01, 0x02}, 542 {0xfe, 0xfe}, 543 {0xfd, 0xfe}, 544 {0x00, 0x00}, 545 } 546 for i, tc := range cases { 547 it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos}, false) 548 if err != nil { 549 t.Fatalf("case %d, error: %v", i, err) 550 } 551 count := 0 552 for it.Next() { 553 k, v, err := it.Hash()[0], it.Account()[0], it.Error() 554 if err != nil { 555 t.Fatalf("test %d, item %d, error: %v", i, count, err) 556 } 557 // First item in iterator should have the expected key 558 if count == 0 && k != tc.expkey { 559 t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey) 560 } 561 count++ 562 if v != k { 563 t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k) 564 } 565 } 566 } 567 }