gitlab.com/flarenetwork/coreth@v0.1.1/core/state/snapshot/disklayer_test.go (about) 1 // (c) 2019-2020, Ava Labs, Inc. 2 // 3 // This file is a derived work, based on the go-ethereum library whose original 4 // notices appear below. 5 // 6 // It is distributed under a license compatible with the licensing terms of the 7 // original code from which it is derived. 8 // 9 // Much love to the original authors for their work. 10 // ********** 11 // Copyright 2019 The go-ethereum Authors 12 // This file is part of the go-ethereum library. 13 // 14 // The go-ethereum library is free software: you can redistribute it and/or modify 15 // it under the terms of the GNU Lesser General Public License as published by 16 // the Free Software Foundation, either version 3 of the License, or 17 // (at your option) any later version. 18 // 19 // The go-ethereum library is distributed in the hope that it will be useful, 20 // but WITHOUT ANY WARRANTY; without even the implied warranty of 21 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 // GNU Lesser General Public License for more details. 23 // 24 // You should have received a copy of the GNU Lesser General Public License 25 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 26 27 package snapshot 28 29 import ( 30 "bytes" 31 "io/ioutil" 32 "os" 33 "testing" 34 35 "github.com/ethereum/go-ethereum/common" 36 "github.com/ethereum/go-ethereum/ethdb" 37 "github.com/ethereum/go-ethereum/ethdb/leveldb" 38 "github.com/ethereum/go-ethereum/ethdb/memorydb" 39 "github.com/ethereum/go-ethereum/rlp" 40 "gitlab.com/flarenetwork/coreth/core/rawdb" 41 ) 42 43 // reverse reverses the contents of a byte slice. It's used to update random accs 44 // with deterministic changes. 45 func reverse(blob []byte) []byte { 46 res := make([]byte, len(blob)) 47 for i, b := range blob { 48 res[len(blob)-1-i] = b 49 } 50 return res 51 } 52 53 // Tests that merging something into a disk layer persists it into the database 54 // and invalidates any previously written and cached values. 55 func TestDiskMerge(t *testing.T) { 56 // Create some accounts in the disk layer 57 db := memorydb.New() 58 59 var ( 60 accNoModNoCache = common.Hash{0x1} 61 accNoModCache = common.Hash{0x2} 62 accModNoCache = common.Hash{0x3} 63 accModCache = common.Hash{0x4} 64 accDelNoCache = common.Hash{0x5} 65 accDelCache = common.Hash{0x6} 66 conNoModNoCache = common.Hash{0x7} 67 conNoModNoCacheSlot = common.Hash{0x70} 68 conNoModCache = common.Hash{0x8} 69 conNoModCacheSlot = common.Hash{0x80} 70 conModNoCache = common.Hash{0x9} 71 conModNoCacheSlot = common.Hash{0x90} 72 conModCache = common.Hash{0xa} 73 conModCacheSlot = common.Hash{0xa0} 74 conDelNoCache = common.Hash{0xb} 75 conDelNoCacheSlot = common.Hash{0xb0} 76 conDelCache = common.Hash{0xc} 77 conDelCacheSlot = common.Hash{0xc0} 78 conNukeNoCache = common.Hash{0xd} 79 conNukeNoCacheSlot = common.Hash{0xd0} 80 conNukeCache = common.Hash{0xe} 81 conNukeCacheSlot = common.Hash{0xe0} 82 baseRoot = randomHash() 83 baseBlockHash = randomHash() 84 diffRoot = randomHash() 85 diffBlockHash = randomHash() 86 ) 87 88 rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:]) 89 rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:]) 90 rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:]) 91 rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:]) 92 rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:]) 93 rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:]) 94 95 rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:]) 96 rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 97 rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:]) 98 rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 99 rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:]) 100 rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) 101 rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:]) 102 rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:]) 103 rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:]) 104 rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) 105 rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:]) 106 rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:]) 107 108 rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:]) 109 rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) 110 rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:]) 111 rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) 112 113 rawdb.WriteSnapshotBlockHash(db, baseBlockHash) 114 rawdb.WriteSnapshotRoot(db, baseRoot) 115 116 // Create a disk layer based on the above and cache in some data 117 snaps := NewTestTree(db, baseBlockHash, baseRoot) 118 base := snaps.Snapshot(baseRoot) 119 base.AccountRLP(accNoModCache) 120 base.AccountRLP(accModCache) 121 base.AccountRLP(accDelCache) 122 base.Storage(conNoModCache, conNoModCacheSlot) 123 base.Storage(conModCache, conModCacheSlot) 124 base.Storage(conDelCache, conDelCacheSlot) 125 base.Storage(conNukeCache, conNukeCacheSlot) 126 127 // Modify or delete some accounts, flatten everything onto disk 128 if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{ 129 accDelNoCache: {}, 130 accDelCache: {}, 131 conNukeNoCache: {}, 132 conNukeCache: {}, 133 }, map[common.Hash][]byte{ 134 accModNoCache: reverse(accModNoCache[:]), 135 accModCache: reverse(accModCache[:]), 136 }, map[common.Hash]map[common.Hash][]byte{ 137 conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, 138 conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, 139 conDelNoCache: {conDelNoCacheSlot: nil}, 140 conDelCache: {conDelCacheSlot: nil}, 141 }); err != nil { 142 t.Fatalf("failed to update snapshot tree: %v", err) 143 } 144 snaps.verified = true // Bypass validation of junk data 145 if err := snaps.Flatten(diffBlockHash); err != nil { 146 t.Fatalf("failed to flatten snapshot tree: %v", err) 147 } 148 // Retrieve all the data through the disk layer and validate it 149 base = snaps.Snapshot(diffRoot) 150 if _, ok := base.(*diskLayer); !ok { 151 t.Fatalf("update not flattend into the disk layer") 152 } 153 154 // assertAccount ensures that an account matches the given blob. 155 assertAccount := func(account common.Hash, data []byte) { 156 t.Helper() 157 blob, err := base.AccountRLP(account) 158 if err != nil { 159 t.Errorf("account access (%x) failed: %v", account, err) 160 } else if !bytes.Equal(blob, data) { 161 t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data) 162 } 163 } 164 assertAccount(accNoModNoCache, accNoModNoCache[:]) 165 assertAccount(accNoModCache, accNoModCache[:]) 166 assertAccount(accModNoCache, reverse(accModNoCache[:])) 167 assertAccount(accModCache, reverse(accModCache[:])) 168 assertAccount(accDelNoCache, nil) 169 assertAccount(accDelCache, nil) 170 171 // assertStorage ensures that a storage slot matches the given blob. 172 assertStorage := func(account common.Hash, slot common.Hash, data []byte) { 173 t.Helper() 174 blob, err := base.Storage(account, slot) 175 if err != nil { 176 t.Errorf("storage access (%x:%x) failed: %v", account, slot, err) 177 } else if !bytes.Equal(blob, data) { 178 t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) 179 } 180 } 181 assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 182 assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 183 assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 184 assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 185 assertStorage(conDelNoCache, conDelNoCacheSlot, nil) 186 assertStorage(conDelCache, conDelCacheSlot, nil) 187 assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 188 assertStorage(conNukeCache, conNukeCacheSlot, nil) 189 190 // Retrieve all the data directly from the database and validate it 191 192 // assertDatabaseAccount ensures that an account from the database matches the given blob. 193 assertDatabaseAccount := func(account common.Hash, data []byte) { 194 t.Helper() 195 if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) { 196 t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data) 197 } 198 } 199 assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) 200 assertDatabaseAccount(accNoModCache, accNoModCache[:]) 201 assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) 202 assertDatabaseAccount(accModCache, reverse(accModCache[:])) 203 assertDatabaseAccount(accDelNoCache, nil) 204 assertDatabaseAccount(accDelCache, nil) 205 206 // assertDatabaseStorage ensures that a storage slot from the database matches the given blob. 207 assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { 208 t.Helper() 209 if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) { 210 t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) 211 } 212 } 213 assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 214 assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 215 assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 216 assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 217 assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) 218 assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) 219 assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 220 assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) 221 } 222 223 // Tests that merging something into a disk layer persists it into the database 224 // and invalidates any previously written and cached values, discarding anything 225 // after the in-progress generation marker. 226 func TestDiskPartialMerge(t *testing.T) { 227 // Iterate the test a few times to ensure we pick various internal orderings 228 // for the data slots as well as the progress marker. 229 for i := 0; i < 1024; i++ { 230 // Create some accounts in the disk layer 231 db := memorydb.New() 232 233 var ( 234 accNoModNoCache = randomHash() 235 accNoModCache = randomHash() 236 accModNoCache = randomHash() 237 accModCache = randomHash() 238 accDelNoCache = randomHash() 239 accDelCache = randomHash() 240 conNoModNoCache = randomHash() 241 conNoModNoCacheSlot = randomHash() 242 conNoModCache = randomHash() 243 conNoModCacheSlot = randomHash() 244 conModNoCache = randomHash() 245 conModNoCacheSlot = randomHash() 246 conModCache = randomHash() 247 conModCacheSlot = randomHash() 248 conDelNoCache = randomHash() 249 conDelNoCacheSlot = randomHash() 250 conDelCache = randomHash() 251 conDelCacheSlot = randomHash() 252 conNukeNoCache = randomHash() 253 conNukeNoCacheSlot = randomHash() 254 conNukeCache = randomHash() 255 conNukeCacheSlot = randomHash() 256 baseRoot = randomHash() 257 baseBlockHash = randomHash() 258 diffRoot = randomHash() 259 diffBlockHash = randomHash() 260 genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) 261 ) 262 263 // insertAccount injects an account into the database if it's after the 264 // generator marker, drops the op otherwise. This is needed to seed the 265 // database with a valid starting snapshot. 266 insertAccount := func(account common.Hash, data []byte) { 267 if bytes.Compare(account[:], genMarker) <= 0 { 268 rawdb.WriteAccountSnapshot(db, account, data[:]) 269 } 270 } 271 insertAccount(accNoModNoCache, accNoModNoCache[:]) 272 insertAccount(accNoModCache, accNoModCache[:]) 273 insertAccount(accModNoCache, accModNoCache[:]) 274 insertAccount(accModCache, accModCache[:]) 275 insertAccount(accDelNoCache, accDelNoCache[:]) 276 insertAccount(accDelCache, accDelCache[:]) 277 278 // insertStorage injects a storage slot into the database if it's after 279 // the generator marker, drops the op otherwise. This is needed to seed 280 // the database with a valid starting snapshot. 281 insertStorage := func(account common.Hash, slot common.Hash, data []byte) { 282 if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 { 283 rawdb.WriteStorageSnapshot(db, account, slot, data[:]) 284 } 285 } 286 insertAccount(conNoModNoCache, conNoModNoCache[:]) 287 insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 288 insertAccount(conNoModCache, conNoModCache[:]) 289 insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 290 insertAccount(conModNoCache, conModNoCache[:]) 291 insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) 292 insertAccount(conModCache, conModCache[:]) 293 insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) 294 insertAccount(conDelNoCache, conDelNoCache[:]) 295 insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) 296 insertAccount(conDelCache, conDelCache[:]) 297 insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) 298 299 insertAccount(conNukeNoCache, conNukeNoCache[:]) 300 insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) 301 insertAccount(conNukeCache, conNukeCache[:]) 302 insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) 303 304 rawdb.WriteSnapshotBlockHash(db, baseBlockHash) 305 rawdb.WriteSnapshotRoot(db, baseRoot) 306 307 // Create a disk layer based on the above using a random progress marker 308 // and cache in some data. 309 snaps := NewTestTree(db, baseBlockHash, baseRoot) 310 dl := snaps.disklayer() 311 dl.genMarker = genMarker 312 base := snaps.Snapshot(baseRoot) 313 314 // assertAccount ensures that an account matches the given blob if it's 315 // already covered by the disk snapshot, and errors out otherwise. 316 assertAccount := func(account common.Hash, data []byte) { 317 t.Helper() 318 blob, err := base.AccountRLP(account) 319 if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet { 320 t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob) 321 } 322 if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { 323 t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) 324 } 325 } 326 assertAccount(accNoModCache, accNoModCache[:]) 327 assertAccount(accModCache, accModCache[:]) 328 assertAccount(accDelCache, accDelCache[:]) 329 330 // assertStorage ensures that a storage slot matches the given blob if 331 // it's already covered by the disk snapshot, and errors out otherwise. 332 assertStorage := func(account common.Hash, slot common.Hash, data []byte) { 333 t.Helper() 334 blob, err := base.Storage(account, slot) 335 if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet { 336 t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) 337 } 338 if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { 339 t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) 340 } 341 } 342 assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 343 assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) 344 assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) 345 assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) 346 347 // Modify or delete some accounts, flatten everything onto disk 348 if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, map[common.Hash]struct{}{ 349 accDelNoCache: {}, 350 accDelCache: {}, 351 conNukeNoCache: {}, 352 conNukeCache: {}, 353 }, map[common.Hash][]byte{ 354 accModNoCache: reverse(accModNoCache[:]), 355 accModCache: reverse(accModCache[:]), 356 }, map[common.Hash]map[common.Hash][]byte{ 357 conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, 358 conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, 359 conDelNoCache: {conDelNoCacheSlot: nil}, 360 conDelCache: {conDelCacheSlot: nil}, 361 }); err != nil { 362 t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) 363 } 364 if err := snaps.Flatten(diffBlockHash); err != nil { 365 t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err) 366 } 367 // Retrieve all the data through the disk layer and validate it 368 base = snaps.Snapshot(diffRoot) 369 if _, ok := base.(*diskLayer); !ok { 370 t.Fatalf("test %d: update not flattend into the disk layer", i) 371 } 372 assertAccount(accNoModNoCache, accNoModNoCache[:]) 373 assertAccount(accNoModCache, accNoModCache[:]) 374 assertAccount(accModNoCache, reverse(accModNoCache[:])) 375 assertAccount(accModCache, reverse(accModCache[:])) 376 assertAccount(accDelNoCache, nil) 377 assertAccount(accDelCache, nil) 378 379 assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 380 assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 381 assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 382 assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 383 assertStorage(conDelNoCache, conDelNoCacheSlot, nil) 384 assertStorage(conDelCache, conDelCacheSlot, nil) 385 assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 386 assertStorage(conNukeCache, conNukeCacheSlot, nil) 387 388 // Retrieve all the data directly from the database and validate it 389 390 // assertDatabaseAccount ensures that an account inside the database matches 391 // the given blob if it's already covered by the disk snapshot, and does not 392 // exist otherwise. 393 assertDatabaseAccount := func(account common.Hash, data []byte) { 394 t.Helper() 395 blob := rawdb.ReadAccountSnapshot(db, account) 396 if bytes.Compare(account[:], genMarker) > 0 && blob != nil { 397 t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob) 398 } 399 if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { 400 t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) 401 } 402 } 403 assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) 404 assertDatabaseAccount(accNoModCache, accNoModCache[:]) 405 assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) 406 assertDatabaseAccount(accModCache, reverse(accModCache[:])) 407 assertDatabaseAccount(accDelNoCache, nil) 408 assertDatabaseAccount(accDelCache, nil) 409 410 // assertDatabaseStorage ensures that a storage slot inside the database 411 // matches the given blob if it's already covered by the disk snapshot, 412 // and does not exist otherwise. 413 assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { 414 t.Helper() 415 blob := rawdb.ReadStorageSnapshot(db, account, slot) 416 if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil { 417 t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) 418 } 419 if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { 420 t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) 421 } 422 } 423 assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) 424 assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) 425 assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) 426 assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) 427 assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) 428 assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) 429 assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) 430 assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) 431 } 432 } 433 434 // Tests that when the bottom-most diff layer is merged into the disk 435 // layer whether the corresponding generator is persisted correctly. 436 func TestDiskGeneratorPersistence(t *testing.T) { 437 var ( 438 accOne = randomHash() 439 accTwo = randomHash() 440 accOneSlotOne = randomHash() 441 accOneSlotTwo = randomHash() 442 443 accThree = randomHash() 444 accThreeSlot = randomHash() 445 baseRoot = randomHash() 446 baseBlockHash = randomHash() 447 diffRoot = randomHash() 448 diffBlockHash = randomHash() 449 diffTwoRoot = randomHash() 450 diffTwoBlockHash = randomHash() 451 genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) 452 ) 453 // Testing scenario 1, the disk layer is still under the construction. 454 db := rawdb.NewMemoryDatabase() 455 456 rawdb.WriteAccountSnapshot(db, accOne, accOne[:]) 457 rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:]) 458 rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:]) 459 rawdb.WriteSnapshotBlockHash(db, baseBlockHash) 460 rawdb.WriteSnapshotRoot(db, baseRoot) 461 462 // Create a disk layer based on all above updates 463 snaps := NewTestTree(db, baseBlockHash, baseRoot) 464 dl := snaps.disklayer() 465 dl.genMarker = genMarker 466 // Modify or delete some accounts, flatten everything onto disk 467 if err := snaps.Update(diffBlockHash, diffRoot, baseBlockHash, nil, map[common.Hash][]byte{ 468 accTwo: accTwo[:], 469 }, nil); err != nil { 470 t.Fatalf("failed to update snapshot tree: %v", err) 471 } 472 if err := snaps.Flatten(diffBlockHash); err != nil { 473 t.Fatalf("failed to flatten snapshot tree: %v", err) 474 } 475 blob := rawdb.ReadSnapshotGenerator(db) 476 var generator journalGenerator 477 if err := rlp.DecodeBytes(blob, &generator); err != nil { 478 t.Fatalf("Failed to decode snapshot generator %v", err) 479 } 480 if !bytes.Equal(generator.Marker, genMarker) { 481 t.Fatalf("Generator marker is not matched") 482 } 483 // Test scenario 2, the disk layer is fully generated 484 // Modify or delete some accounts, flatten everything onto disk 485 if err := snaps.Update(diffTwoBlockHash, diffTwoRoot, diffBlockHash, nil, map[common.Hash][]byte{ 486 accThree: accThree.Bytes(), 487 }, map[common.Hash]map[common.Hash][]byte{ 488 accThree: {accThreeSlot: accThreeSlot.Bytes()}, 489 }); err != nil { 490 t.Fatalf("failed to update snapshot tree: %v", err) 491 } 492 dl = snaps.disklayer() 493 dl.genMarker = nil // Construction finished 494 snaps.verified = true // Bypass validation of junk data 495 if err := snaps.Flatten(diffTwoBlockHash); err != nil { 496 t.Fatalf("failed to flatten snapshot tree: %v", err) 497 } 498 blob = rawdb.ReadSnapshotGenerator(db) 499 if err := rlp.DecodeBytes(blob, &generator); err != nil { 500 t.Fatalf("Failed to decode snapshot generator %v", err) 501 } 502 if len(generator.Marker) != 0 { 503 t.Fatalf("Failed to update snapshot generator") 504 } 505 } 506 507 // Tests that merging something into a disk layer persists it into the database 508 // and invalidates any previously written and cached values, discarding anything 509 // after the in-progress generation marker. 510 // 511 // This test case is a tiny specialized case of TestDiskPartialMerge, which tests 512 // some very specific cornercases that random tests won't ever trigger. 513 func TestDiskMidAccountPartialMerge(t *testing.T) { 514 // TODO(@karalabe) ? 515 } 516 517 // TestDiskSeek tests that seek-operations work on the disk layer 518 func TestDiskSeek(t *testing.T) { 519 // Create some accounts in the disk layer 520 var db ethdb.Database 521 522 if dir, err := ioutil.TempDir("", "disklayer-test"); err != nil { 523 t.Fatal(err) 524 } else { 525 defer os.RemoveAll(dir) 526 diskdb, err := leveldb.New(dir, 256, 0, "", false) 527 if err != nil { 528 t.Fatal(err) 529 } 530 db = rawdb.NewDatabase(diskdb) 531 } 532 // Fill even keys [0,2,4...] 533 for i := 0; i < 0xff; i += 2 { 534 acc := common.Hash{byte(i)} 535 rawdb.WriteAccountSnapshot(db, acc, acc[:]) 536 } 537 // Add an 'higher' key, with incorrect (higher) prefix 538 highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1} 539 db.Put(highKey, []byte{0xff, 0xff}) 540 541 baseRoot := randomHash() 542 baseBlockHash := randomHash() 543 rawdb.WriteSnapshotBlockHash(db, baseBlockHash) 544 rawdb.WriteSnapshotRoot(db, baseRoot) 545 546 snaps := NewTestTree(db, baseBlockHash, baseRoot) 547 // Test some different seek positions 548 type testcase struct { 549 pos byte 550 expkey byte 551 } 552 var cases = []testcase{ 553 {0xff, 0x55}, // this should exit immediately without checking key 554 {0x01, 0x02}, 555 {0xfe, 0xfe}, 556 {0xfd, 0xfe}, 557 {0x00, 0x00}, 558 } 559 for i, tc := range cases { 560 it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos}, false) 561 if err != nil { 562 t.Fatalf("case %d, error: %v", i, err) 563 } 564 count := 0 565 for it.Next() { 566 k, v, err := it.Hash()[0], it.Account()[0], it.Error() 567 if err != nil { 568 t.Fatalf("test %d, item %d, error: %v", i, count, err) 569 } 570 // First item in iterator should have the expected key 571 if count == 0 && k != tc.expkey { 572 t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey) 573 } 574 count++ 575 if v != k { 576 t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k) 577 } 578 } 579 } 580 }