github.com/codysnider/go-ethereum@v1.10.18-0.20220420071915-14f4ae99222a/core/state/snapshot/disklayer_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snapshot
    18  
    19  import (
    20  	"bytes"
    21  	"testing"
    22  
    23  	"github.com/VictoriaMetrics/fastcache"
    24  	"github.com/ethereum/go-ethereum/common"
    25  	"github.com/ethereum/go-ethereum/core/rawdb"
    26  	"github.com/ethereum/go-ethereum/ethdb/leveldb"
    27  	"github.com/ethereum/go-ethereum/ethdb/memorydb"
    28  	"github.com/ethereum/go-ethereum/rlp"
    29  )
    30  
    31  // reverse reverses the contents of a byte slice. It's used to update random accs
    32  // with deterministic changes.
    33  func reverse(blob []byte) []byte {
    34  	res := make([]byte, len(blob))
    35  	for i, b := range blob {
    36  		res[len(blob)-1-i] = b
    37  	}
    38  	return res
    39  }
    40  
    41  // Tests that merging something into a disk layer persists it into the database
    42  // and invalidates any previously written and cached values.
    43  func TestDiskMerge(t *testing.T) {
    44  	// Create some accounts in the disk layer
    45  	db := memorydb.New()
    46  
    47  	var (
    48  		accNoModNoCache     = common.Hash{0x1}
    49  		accNoModCache       = common.Hash{0x2}
    50  		accModNoCache       = common.Hash{0x3}
    51  		accModCache         = common.Hash{0x4}
    52  		accDelNoCache       = common.Hash{0x5}
    53  		accDelCache         = common.Hash{0x6}
    54  		conNoModNoCache     = common.Hash{0x7}
    55  		conNoModNoCacheSlot = common.Hash{0x70}
    56  		conNoModCache       = common.Hash{0x8}
    57  		conNoModCacheSlot   = common.Hash{0x80}
    58  		conModNoCache       = common.Hash{0x9}
    59  		conModNoCacheSlot   = common.Hash{0x90}
    60  		conModCache         = common.Hash{0xa}
    61  		conModCacheSlot     = common.Hash{0xa0}
    62  		conDelNoCache       = common.Hash{0xb}
    63  		conDelNoCacheSlot   = common.Hash{0xb0}
    64  		conDelCache         = common.Hash{0xc}
    65  		conDelCacheSlot     = common.Hash{0xc0}
    66  		conNukeNoCache      = common.Hash{0xd}
    67  		conNukeNoCacheSlot  = common.Hash{0xd0}
    68  		conNukeCache        = common.Hash{0xe}
    69  		conNukeCacheSlot    = common.Hash{0xe0}
    70  		baseRoot            = randomHash()
    71  		diffRoot            = randomHash()
    72  	)
    73  
    74  	rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:])
    75  	rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:])
    76  	rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:])
    77  	rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:])
    78  	rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:])
    79  	rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:])
    80  
    81  	rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:])
    82  	rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
    83  	rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:])
    84  	rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
    85  	rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:])
    86  	rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
    87  	rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:])
    88  	rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:])
    89  	rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:])
    90  	rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
    91  	rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:])
    92  	rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:])
    93  
    94  	rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:])
    95  	rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
    96  	rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:])
    97  	rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
    98  
    99  	rawdb.WriteSnapshotRoot(db, baseRoot)
   100  
   101  	// Create a disk layer based on the above and cache in some data
   102  	snaps := &Tree{
   103  		layers: map[common.Hash]snapshot{
   104  			baseRoot: &diskLayer{
   105  				diskdb: db,
   106  				cache:  fastcache.New(500 * 1024),
   107  				root:   baseRoot,
   108  			},
   109  		},
   110  	}
   111  	base := snaps.Snapshot(baseRoot)
   112  	base.AccountRLP(accNoModCache)
   113  	base.AccountRLP(accModCache)
   114  	base.AccountRLP(accDelCache)
   115  	base.Storage(conNoModCache, conNoModCacheSlot)
   116  	base.Storage(conModCache, conModCacheSlot)
   117  	base.Storage(conDelCache, conDelCacheSlot)
   118  	base.Storage(conNukeCache, conNukeCacheSlot)
   119  
   120  	// Modify or delete some accounts, flatten everything onto disk
   121  	if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
   122  		accDelNoCache:  {},
   123  		accDelCache:    {},
   124  		conNukeNoCache: {},
   125  		conNukeCache:   {},
   126  	}, map[common.Hash][]byte{
   127  		accModNoCache: reverse(accModNoCache[:]),
   128  		accModCache:   reverse(accModCache[:]),
   129  	}, map[common.Hash]map[common.Hash][]byte{
   130  		conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
   131  		conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])},
   132  		conDelNoCache: {conDelNoCacheSlot: nil},
   133  		conDelCache:   {conDelCacheSlot: nil},
   134  	}); err != nil {
   135  		t.Fatalf("failed to update snapshot tree: %v", err)
   136  	}
   137  	if err := snaps.Cap(diffRoot, 0); err != nil {
   138  		t.Fatalf("failed to flatten snapshot tree: %v", err)
   139  	}
   140  	// Retrieve all the data through the disk layer and validate it
   141  	base = snaps.Snapshot(diffRoot)
   142  	if _, ok := base.(*diskLayer); !ok {
   143  		t.Fatalf("update not flattend into the disk layer")
   144  	}
   145  
   146  	// assertAccount ensures that an account matches the given blob.
   147  	assertAccount := func(account common.Hash, data []byte) {
   148  		t.Helper()
   149  		blob, err := base.AccountRLP(account)
   150  		if err != nil {
   151  			t.Errorf("account access (%x) failed: %v", account, err)
   152  		} else if !bytes.Equal(blob, data) {
   153  			t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data)
   154  		}
   155  	}
   156  	assertAccount(accNoModNoCache, accNoModNoCache[:])
   157  	assertAccount(accNoModCache, accNoModCache[:])
   158  	assertAccount(accModNoCache, reverse(accModNoCache[:]))
   159  	assertAccount(accModCache, reverse(accModCache[:]))
   160  	assertAccount(accDelNoCache, nil)
   161  	assertAccount(accDelCache, nil)
   162  
   163  	// assertStorage ensures that a storage slot matches the given blob.
   164  	assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
   165  		t.Helper()
   166  		blob, err := base.Storage(account, slot)
   167  		if err != nil {
   168  			t.Errorf("storage access (%x:%x) failed: %v", account, slot, err)
   169  		} else if !bytes.Equal(blob, data) {
   170  			t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
   171  		}
   172  	}
   173  	assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   174  	assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   175  	assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   176  	assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   177  	assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
   178  	assertStorage(conDelCache, conDelCacheSlot, nil)
   179  	assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   180  	assertStorage(conNukeCache, conNukeCacheSlot, nil)
   181  
   182  	// Retrieve all the data directly from the database and validate it
   183  
   184  	// assertDatabaseAccount ensures that an account from the database matches the given blob.
   185  	assertDatabaseAccount := func(account common.Hash, data []byte) {
   186  		t.Helper()
   187  		if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) {
   188  			t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data)
   189  		}
   190  	}
   191  	assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
   192  	assertDatabaseAccount(accNoModCache, accNoModCache[:])
   193  	assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
   194  	assertDatabaseAccount(accModCache, reverse(accModCache[:]))
   195  	assertDatabaseAccount(accDelNoCache, nil)
   196  	assertDatabaseAccount(accDelCache, nil)
   197  
   198  	// assertDatabaseStorage ensures that a storage slot from the database matches the given blob.
   199  	assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
   200  		t.Helper()
   201  		if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) {
   202  			t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
   203  		}
   204  	}
   205  	assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   206  	assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   207  	assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   208  	assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   209  	assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
   210  	assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
   211  	assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   212  	assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
   213  }
   214  
   215  // Tests that merging something into a disk layer persists it into the database
   216  // and invalidates any previously written and cached values, discarding anything
   217  // after the in-progress generation marker.
   218  func TestDiskPartialMerge(t *testing.T) {
   219  	// Iterate the test a few times to ensure we pick various internal orderings
   220  	// for the data slots as well as the progress marker.
   221  	for i := 0; i < 1024; i++ {
   222  		// Create some accounts in the disk layer
   223  		db := memorydb.New()
   224  
   225  		var (
   226  			accNoModNoCache     = randomHash()
   227  			accNoModCache       = randomHash()
   228  			accModNoCache       = randomHash()
   229  			accModCache         = randomHash()
   230  			accDelNoCache       = randomHash()
   231  			accDelCache         = randomHash()
   232  			conNoModNoCache     = randomHash()
   233  			conNoModNoCacheSlot = randomHash()
   234  			conNoModCache       = randomHash()
   235  			conNoModCacheSlot   = randomHash()
   236  			conModNoCache       = randomHash()
   237  			conModNoCacheSlot   = randomHash()
   238  			conModCache         = randomHash()
   239  			conModCacheSlot     = randomHash()
   240  			conDelNoCache       = randomHash()
   241  			conDelNoCacheSlot   = randomHash()
   242  			conDelCache         = randomHash()
   243  			conDelCacheSlot     = randomHash()
   244  			conNukeNoCache      = randomHash()
   245  			conNukeNoCacheSlot  = randomHash()
   246  			conNukeCache        = randomHash()
   247  			conNukeCacheSlot    = randomHash()
   248  			baseRoot            = randomHash()
   249  			diffRoot            = randomHash()
   250  			genMarker           = append(randomHash().Bytes(), randomHash().Bytes()...)
   251  		)
   252  
   253  		// insertAccount injects an account into the database if it's after the
   254  		// generator marker, drops the op otherwise. This is needed to seed the
   255  		// database with a valid starting snapshot.
   256  		insertAccount := func(account common.Hash, data []byte) {
   257  			if bytes.Compare(account[:], genMarker) <= 0 {
   258  				rawdb.WriteAccountSnapshot(db, account, data[:])
   259  			}
   260  		}
   261  		insertAccount(accNoModNoCache, accNoModNoCache[:])
   262  		insertAccount(accNoModCache, accNoModCache[:])
   263  		insertAccount(accModNoCache, accModNoCache[:])
   264  		insertAccount(accModCache, accModCache[:])
   265  		insertAccount(accDelNoCache, accDelNoCache[:])
   266  		insertAccount(accDelCache, accDelCache[:])
   267  
   268  		// insertStorage injects a storage slot into the database if it's after
   269  		// the  generator marker, drops the op otherwise. This is needed to seed
   270  		// the  database with a valid starting snapshot.
   271  		insertStorage := func(account common.Hash, slot common.Hash, data []byte) {
   272  			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 {
   273  				rawdb.WriteStorageSnapshot(db, account, slot, data[:])
   274  			}
   275  		}
   276  		insertAccount(conNoModNoCache, conNoModNoCache[:])
   277  		insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   278  		insertAccount(conNoModCache, conNoModCache[:])
   279  		insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   280  		insertAccount(conModNoCache, conModNoCache[:])
   281  		insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
   282  		insertAccount(conModCache, conModCache[:])
   283  		insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
   284  		insertAccount(conDelNoCache, conDelNoCache[:])
   285  		insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
   286  		insertAccount(conDelCache, conDelCache[:])
   287  		insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
   288  
   289  		insertAccount(conNukeNoCache, conNukeNoCache[:])
   290  		insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
   291  		insertAccount(conNukeCache, conNukeCache[:])
   292  		insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
   293  
   294  		rawdb.WriteSnapshotRoot(db, baseRoot)
   295  
   296  		// Create a disk layer based on the above using a random progress marker
   297  		// and cache in some data.
   298  		snaps := &Tree{
   299  			layers: map[common.Hash]snapshot{
   300  				baseRoot: &diskLayer{
   301  					diskdb: db,
   302  					cache:  fastcache.New(500 * 1024),
   303  					root:   baseRoot,
   304  				},
   305  			},
   306  		}
   307  		snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker
   308  		base := snaps.Snapshot(baseRoot)
   309  
   310  		// assertAccount ensures that an account matches the given blob if it's
   311  		// already covered by the disk snapshot, and errors out otherwise.
   312  		assertAccount := func(account common.Hash, data []byte) {
   313  			t.Helper()
   314  			blob, err := base.AccountRLP(account)
   315  			if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet {
   316  				t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob)
   317  			}
   318  			if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
   319  				t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
   320  			}
   321  		}
   322  		assertAccount(accNoModCache, accNoModCache[:])
   323  		assertAccount(accModCache, accModCache[:])
   324  		assertAccount(accDelCache, accDelCache[:])
   325  
   326  		// assertStorage ensures that a storage slot matches the given blob if
   327  		// it's already covered by the disk snapshot, and errors out otherwise.
   328  		assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
   329  			t.Helper()
   330  			blob, err := base.Storage(account, slot)
   331  			if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet {
   332  				t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
   333  			}
   334  			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
   335  				t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
   336  			}
   337  		}
   338  		assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   339  		assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
   340  		assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
   341  		assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
   342  
   343  		// Modify or delete some accounts, flatten everything onto disk
   344  		if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
   345  			accDelNoCache:  {},
   346  			accDelCache:    {},
   347  			conNukeNoCache: {},
   348  			conNukeCache:   {},
   349  		}, map[common.Hash][]byte{
   350  			accModNoCache: reverse(accModNoCache[:]),
   351  			accModCache:   reverse(accModCache[:]),
   352  		}, map[common.Hash]map[common.Hash][]byte{
   353  			conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
   354  			conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])},
   355  			conDelNoCache: {conDelNoCacheSlot: nil},
   356  			conDelCache:   {conDelCacheSlot: nil},
   357  		}); err != nil {
   358  			t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
   359  		}
   360  		if err := snaps.Cap(diffRoot, 0); err != nil {
   361  			t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err)
   362  		}
   363  		// Retrieve all the data through the disk layer and validate it
   364  		base = snaps.Snapshot(diffRoot)
   365  		if _, ok := base.(*diskLayer); !ok {
   366  			t.Fatalf("test %d: update not flattend into the disk layer", i)
   367  		}
   368  		assertAccount(accNoModNoCache, accNoModNoCache[:])
   369  		assertAccount(accNoModCache, accNoModCache[:])
   370  		assertAccount(accModNoCache, reverse(accModNoCache[:]))
   371  		assertAccount(accModCache, reverse(accModCache[:]))
   372  		assertAccount(accDelNoCache, nil)
   373  		assertAccount(accDelCache, nil)
   374  
   375  		assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   376  		assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   377  		assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   378  		assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   379  		assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
   380  		assertStorage(conDelCache, conDelCacheSlot, nil)
   381  		assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   382  		assertStorage(conNukeCache, conNukeCacheSlot, nil)
   383  
   384  		// Retrieve all the data directly from the database and validate it
   385  
   386  		// assertDatabaseAccount ensures that an account inside the database matches
   387  		// the given blob if it's already covered by the disk snapshot, and does not
   388  		// exist otherwise.
   389  		assertDatabaseAccount := func(account common.Hash, data []byte) {
   390  			t.Helper()
   391  			blob := rawdb.ReadAccountSnapshot(db, account)
   392  			if bytes.Compare(account[:], genMarker) > 0 && blob != nil {
   393  				t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob)
   394  			}
   395  			if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
   396  				t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
   397  			}
   398  		}
   399  		assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
   400  		assertDatabaseAccount(accNoModCache, accNoModCache[:])
   401  		assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
   402  		assertDatabaseAccount(accModCache, reverse(accModCache[:]))
   403  		assertDatabaseAccount(accDelNoCache, nil)
   404  		assertDatabaseAccount(accDelCache, nil)
   405  
   406  		// assertDatabaseStorage ensures that a storage slot inside the database
   407  		// matches the given blob if it's already covered by the disk snapshot,
   408  		// and does not exist otherwise.
   409  		assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
   410  			t.Helper()
   411  			blob := rawdb.ReadStorageSnapshot(db, account, slot)
   412  			if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil {
   413  				t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
   414  			}
   415  			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
   416  				t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
   417  			}
   418  		}
   419  		assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   420  		assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   421  		assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   422  		assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   423  		assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
   424  		assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
   425  		assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   426  		assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
   427  	}
   428  }
   429  
   430  // Tests that when the bottom-most diff layer is merged into the disk
   431  // layer whether the corresponding generator is persisted correctly.
   432  func TestDiskGeneratorPersistence(t *testing.T) {
   433  	var (
   434  		accOne        = randomHash()
   435  		accTwo        = randomHash()
   436  		accOneSlotOne = randomHash()
   437  		accOneSlotTwo = randomHash()
   438  
   439  		accThree     = randomHash()
   440  		accThreeSlot = randomHash()
   441  		baseRoot     = randomHash()
   442  		diffRoot     = randomHash()
   443  		diffTwoRoot  = randomHash()
   444  		genMarker    = append(randomHash().Bytes(), randomHash().Bytes()...)
   445  	)
   446  	// Testing scenario 1, the disk layer is still under the construction.
   447  	db := rawdb.NewMemoryDatabase()
   448  
   449  	rawdb.WriteAccountSnapshot(db, accOne, accOne[:])
   450  	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:])
   451  	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:])
   452  	rawdb.WriteSnapshotRoot(db, baseRoot)
   453  
   454  	// Create a disk layer based on all above updates
   455  	snaps := &Tree{
   456  		layers: map[common.Hash]snapshot{
   457  			baseRoot: &diskLayer{
   458  				diskdb:    db,
   459  				cache:     fastcache.New(500 * 1024),
   460  				root:      baseRoot,
   461  				genMarker: genMarker,
   462  			},
   463  		},
   464  	}
   465  	// Modify or delete some accounts, flatten everything onto disk
   466  	if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
   467  		accTwo: accTwo[:],
   468  	}, nil); err != nil {
   469  		t.Fatalf("failed to update snapshot tree: %v", err)
   470  	}
   471  	if err := snaps.Cap(diffRoot, 0); err != nil {
   472  		t.Fatalf("failed to flatten snapshot tree: %v", err)
   473  	}
   474  	blob := rawdb.ReadSnapshotGenerator(db)
   475  	var generator journalGenerator
   476  	if err := rlp.DecodeBytes(blob, &generator); err != nil {
   477  		t.Fatalf("Failed to decode snapshot generator %v", err)
   478  	}
   479  	if !bytes.Equal(generator.Marker, genMarker) {
   480  		t.Fatalf("Generator marker is not matched")
   481  	}
   482  	// Test scenario 2, the disk layer is fully generated
   483  	// Modify or delete some accounts, flatten everything onto disk
   484  	if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
   485  		accThree: accThree.Bytes(),
   486  	}, map[common.Hash]map[common.Hash][]byte{
   487  		accThree: {accThreeSlot: accThreeSlot.Bytes()},
   488  	}); err != nil {
   489  		t.Fatalf("failed to update snapshot tree: %v", err)
   490  	}
   491  	diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
   492  	diskLayer.genMarker = nil // Construction finished
   493  	if err := snaps.Cap(diffTwoRoot, 0); err != nil {
   494  		t.Fatalf("failed to flatten snapshot tree: %v", err)
   495  	}
   496  	blob = rawdb.ReadSnapshotGenerator(db)
   497  	if err := rlp.DecodeBytes(blob, &generator); err != nil {
   498  		t.Fatalf("Failed to decode snapshot generator %v", err)
   499  	}
   500  	if len(generator.Marker) != 0 {
   501  		t.Fatalf("Failed to update snapshot generator")
   502  	}
   503  }
   504  
   505  // Tests that merging something into a disk layer persists it into the database
   506  // and invalidates any previously written and cached values, discarding anything
   507  // after the in-progress generation marker.
   508  //
   509  // This test case is a tiny specialized case of TestDiskPartialMerge, which tests
   510  // some very specific cornercases that random tests won't ever trigger.
   511  func TestDiskMidAccountPartialMerge(t *testing.T) {
   512  	// TODO(@karalabe) ?
   513  }
   514  
   515  // TestDiskSeek tests that seek-operations work on the disk layer
   516  func TestDiskSeek(t *testing.T) {
   517  	// Create some accounts in the disk layer
   518  	diskdb, err := leveldb.New(t.TempDir(), 256, 0, "", false)
   519  	if err != nil {
   520  		t.Fatal(err)
   521  	}
   522  	db := rawdb.NewDatabase(diskdb)
   523  	defer db.Close()
   524  
   525  	// Fill even keys [0,2,4...]
   526  	for i := 0; i < 0xff; i += 2 {
   527  		acc := common.Hash{byte(i)}
   528  		rawdb.WriteAccountSnapshot(db, acc, acc[:])
   529  	}
   530  	// Add an 'higher' key, with incorrect (higher) prefix
   531  	highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1}
   532  	db.Put(highKey, []byte{0xff, 0xff})
   533  
   534  	baseRoot := randomHash()
   535  	rawdb.WriteSnapshotRoot(db, baseRoot)
   536  
   537  	snaps := &Tree{
   538  		layers: map[common.Hash]snapshot{
   539  			baseRoot: &diskLayer{
   540  				diskdb: db,
   541  				cache:  fastcache.New(500 * 1024),
   542  				root:   baseRoot,
   543  			},
   544  		},
   545  	}
   546  	// Test some different seek positions
   547  	type testcase struct {
   548  		pos    byte
   549  		expkey byte
   550  	}
   551  	var cases = []testcase{
   552  		{0xff, 0x55}, // this should exit immediately without checking key
   553  		{0x01, 0x02},
   554  		{0xfe, 0xfe},
   555  		{0xfd, 0xfe},
   556  		{0x00, 0x00},
   557  	}
   558  	for i, tc := range cases {
   559  		it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos})
   560  		if err != nil {
   561  			t.Fatalf("case %d, error: %v", i, err)
   562  		}
   563  		count := 0
   564  		for it.Next() {
   565  			k, v, err := it.Hash()[0], it.Account()[0], it.Error()
   566  			if err != nil {
   567  				t.Fatalf("test %d, item %d, error: %v", i, count, err)
   568  			}
   569  			// First item in iterator should have the expected key
   570  			if count == 0 && k != tc.expkey {
   571  				t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey)
   572  			}
   573  			count++
   574  			if v != k {
   575  				t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k)
   576  			}
   577  		}
   578  	}
   579  }