gitee.com/liu-zhao234568/cntest@v1.0.0/core/state/snapshot/disklayer_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snapshot
    18  
    19  import (
    20  	"bytes"
    21  	"io/ioutil"
    22  	"os"
    23  	"testing"
    24  
    25  	"gitee.com/liu-zhao234568/cntest/common"
    26  	"gitee.com/liu-zhao234568/cntest/core/rawdb"
    27  	"gitee.com/liu-zhao234568/cntest/ethdb"
    28  	"gitee.com/liu-zhao234568/cntest/ethdb/leveldb"
    29  	"gitee.com/liu-zhao234568/cntest/ethdb/memorydb"
    30  	"gitee.com/liu-zhao234568/cntest/rlp"
    31  	"github.com/VictoriaMetrics/fastcache"
    32  )
    33  
    34  // reverse reverses the contents of a byte slice. It's used to update random accs
    35  // with deterministic changes.
    36  func reverse(blob []byte) []byte {
    37  	res := make([]byte, len(blob))
    38  	for i, b := range blob {
    39  		res[len(blob)-1-i] = b
    40  	}
    41  	return res
    42  }
    43  
    44  // Tests that merging something into a disk layer persists it into the database
    45  // and invalidates any previously written and cached values.
    46  func TestDiskMerge(t *testing.T) {
    47  	// Create some accounts in the disk layer
    48  	db := memorydb.New()
    49  
    50  	var (
    51  		accNoModNoCache     = common.Hash{0x1}
    52  		accNoModCache       = common.Hash{0x2}
    53  		accModNoCache       = common.Hash{0x3}
    54  		accModCache         = common.Hash{0x4}
    55  		accDelNoCache       = common.Hash{0x5}
    56  		accDelCache         = common.Hash{0x6}
    57  		conNoModNoCache     = common.Hash{0x7}
    58  		conNoModNoCacheSlot = common.Hash{0x70}
    59  		conNoModCache       = common.Hash{0x8}
    60  		conNoModCacheSlot   = common.Hash{0x80}
    61  		conModNoCache       = common.Hash{0x9}
    62  		conModNoCacheSlot   = common.Hash{0x90}
    63  		conModCache         = common.Hash{0xa}
    64  		conModCacheSlot     = common.Hash{0xa0}
    65  		conDelNoCache       = common.Hash{0xb}
    66  		conDelNoCacheSlot   = common.Hash{0xb0}
    67  		conDelCache         = common.Hash{0xc}
    68  		conDelCacheSlot     = common.Hash{0xc0}
    69  		conNukeNoCache      = common.Hash{0xd}
    70  		conNukeNoCacheSlot  = common.Hash{0xd0}
    71  		conNukeCache        = common.Hash{0xe}
    72  		conNukeCacheSlot    = common.Hash{0xe0}
    73  		baseRoot            = randomHash()
    74  		diffRoot            = randomHash()
    75  	)
    76  
    77  	rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:])
    78  	rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:])
    79  	rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:])
    80  	rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:])
    81  	rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:])
    82  	rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:])
    83  
    84  	rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:])
    85  	rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
    86  	rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:])
    87  	rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
    88  	rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:])
    89  	rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
    90  	rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:])
    91  	rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:])
    92  	rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:])
    93  	rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
    94  	rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:])
    95  	rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:])
    96  
    97  	rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:])
    98  	rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
    99  	rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:])
   100  	rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
   101  
   102  	rawdb.WriteSnapshotRoot(db, baseRoot)
   103  
   104  	// Create a disk layer based on the above and cache in some data
   105  	snaps := &Tree{
   106  		layers: map[common.Hash]snapshot{
   107  			baseRoot: &diskLayer{
   108  				diskdb: db,
   109  				cache:  fastcache.New(500 * 1024),
   110  				root:   baseRoot,
   111  			},
   112  		},
   113  	}
   114  	base := snaps.Snapshot(baseRoot)
   115  	base.AccountRLP(accNoModCache)
   116  	base.AccountRLP(accModCache)
   117  	base.AccountRLP(accDelCache)
   118  	base.Storage(conNoModCache, conNoModCacheSlot)
   119  	base.Storage(conModCache, conModCacheSlot)
   120  	base.Storage(conDelCache, conDelCacheSlot)
   121  	base.Storage(conNukeCache, conNukeCacheSlot)
   122  
   123  	// Modify or delete some accounts, flatten everything onto disk
   124  	if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
   125  		accDelNoCache:  {},
   126  		accDelCache:    {},
   127  		conNukeNoCache: {},
   128  		conNukeCache:   {},
   129  	}, map[common.Hash][]byte{
   130  		accModNoCache: reverse(accModNoCache[:]),
   131  		accModCache:   reverse(accModCache[:]),
   132  	}, map[common.Hash]map[common.Hash][]byte{
   133  		conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
   134  		conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])},
   135  		conDelNoCache: {conDelNoCacheSlot: nil},
   136  		conDelCache:   {conDelCacheSlot: nil},
   137  	}); err != nil {
   138  		t.Fatalf("failed to update snapshot tree: %v", err)
   139  	}
   140  	if err := snaps.Cap(diffRoot, 0); err != nil {
   141  		t.Fatalf("failed to flatten snapshot tree: %v", err)
   142  	}
   143  	// Retrieve all the data through the disk layer and validate it
   144  	base = snaps.Snapshot(diffRoot)
   145  	if _, ok := base.(*diskLayer); !ok {
   146  		t.Fatalf("update not flattend into the disk layer")
   147  	}
   148  
   149  	// assertAccount ensures that an account matches the given blob.
   150  	assertAccount := func(account common.Hash, data []byte) {
   151  		t.Helper()
   152  		blob, err := base.AccountRLP(account)
   153  		if err != nil {
   154  			t.Errorf("account access (%x) failed: %v", account, err)
   155  		} else if !bytes.Equal(blob, data) {
   156  			t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data)
   157  		}
   158  	}
   159  	assertAccount(accNoModNoCache, accNoModNoCache[:])
   160  	assertAccount(accNoModCache, accNoModCache[:])
   161  	assertAccount(accModNoCache, reverse(accModNoCache[:]))
   162  	assertAccount(accModCache, reverse(accModCache[:]))
   163  	assertAccount(accDelNoCache, nil)
   164  	assertAccount(accDelCache, nil)
   165  
   166  	// assertStorage ensures that a storage slot matches the given blob.
   167  	assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
   168  		t.Helper()
   169  		blob, err := base.Storage(account, slot)
   170  		if err != nil {
   171  			t.Errorf("storage access (%x:%x) failed: %v", account, slot, err)
   172  		} else if !bytes.Equal(blob, data) {
   173  			t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
   174  		}
   175  	}
   176  	assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   177  	assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   178  	assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   179  	assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   180  	assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
   181  	assertStorage(conDelCache, conDelCacheSlot, nil)
   182  	assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   183  	assertStorage(conNukeCache, conNukeCacheSlot, nil)
   184  
   185  	// Retrieve all the data directly from the database and validate it
   186  
   187  	// assertDatabaseAccount ensures that an account from the database matches the given blob.
   188  	assertDatabaseAccount := func(account common.Hash, data []byte) {
   189  		t.Helper()
   190  		if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) {
   191  			t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data)
   192  		}
   193  	}
   194  	assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
   195  	assertDatabaseAccount(accNoModCache, accNoModCache[:])
   196  	assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
   197  	assertDatabaseAccount(accModCache, reverse(accModCache[:]))
   198  	assertDatabaseAccount(accDelNoCache, nil)
   199  	assertDatabaseAccount(accDelCache, nil)
   200  
   201  	// assertDatabaseStorage ensures that a storage slot from the database matches the given blob.
   202  	assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
   203  		t.Helper()
   204  		if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) {
   205  			t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
   206  		}
   207  	}
   208  	assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   209  	assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   210  	assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   211  	assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   212  	assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
   213  	assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
   214  	assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   215  	assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
   216  }
   217  
   218  // Tests that merging something into a disk layer persists it into the database
   219  // and invalidates any previously written and cached values, discarding anything
   220  // after the in-progress generation marker.
   221  func TestDiskPartialMerge(t *testing.T) {
   222  	// Iterate the test a few times to ensure we pick various internal orderings
   223  	// for the data slots as well as the progress marker.
   224  	for i := 0; i < 1024; i++ {
   225  		// Create some accounts in the disk layer
   226  		db := memorydb.New()
   227  
   228  		var (
   229  			accNoModNoCache     = randomHash()
   230  			accNoModCache       = randomHash()
   231  			accModNoCache       = randomHash()
   232  			accModCache         = randomHash()
   233  			accDelNoCache       = randomHash()
   234  			accDelCache         = randomHash()
   235  			conNoModNoCache     = randomHash()
   236  			conNoModNoCacheSlot = randomHash()
   237  			conNoModCache       = randomHash()
   238  			conNoModCacheSlot   = randomHash()
   239  			conModNoCache       = randomHash()
   240  			conModNoCacheSlot   = randomHash()
   241  			conModCache         = randomHash()
   242  			conModCacheSlot     = randomHash()
   243  			conDelNoCache       = randomHash()
   244  			conDelNoCacheSlot   = randomHash()
   245  			conDelCache         = randomHash()
   246  			conDelCacheSlot     = randomHash()
   247  			conNukeNoCache      = randomHash()
   248  			conNukeNoCacheSlot  = randomHash()
   249  			conNukeCache        = randomHash()
   250  			conNukeCacheSlot    = randomHash()
   251  			baseRoot            = randomHash()
   252  			diffRoot            = randomHash()
   253  			genMarker           = append(randomHash().Bytes(), randomHash().Bytes()...)
   254  		)
   255  
   256  		// insertAccount injects an account into the database if it's after the
   257  		// generator marker, drops the op otherwise. This is needed to seed the
   258  		// database with a valid starting snapshot.
   259  		insertAccount := func(account common.Hash, data []byte) {
   260  			if bytes.Compare(account[:], genMarker) <= 0 {
   261  				rawdb.WriteAccountSnapshot(db, account, data[:])
   262  			}
   263  		}
   264  		insertAccount(accNoModNoCache, accNoModNoCache[:])
   265  		insertAccount(accNoModCache, accNoModCache[:])
   266  		insertAccount(accModNoCache, accModNoCache[:])
   267  		insertAccount(accModCache, accModCache[:])
   268  		insertAccount(accDelNoCache, accDelNoCache[:])
   269  		insertAccount(accDelCache, accDelCache[:])
   270  
   271  		// insertStorage injects a storage slot into the database if it's after
   272  		// the  generator marker, drops the op otherwise. This is needed to seed
   273  		// the  database with a valid starting snapshot.
   274  		insertStorage := func(account common.Hash, slot common.Hash, data []byte) {
   275  			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 {
   276  				rawdb.WriteStorageSnapshot(db, account, slot, data[:])
   277  			}
   278  		}
   279  		insertAccount(conNoModNoCache, conNoModNoCache[:])
   280  		insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   281  		insertAccount(conNoModCache, conNoModCache[:])
   282  		insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   283  		insertAccount(conModNoCache, conModNoCache[:])
   284  		insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
   285  		insertAccount(conModCache, conModCache[:])
   286  		insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
   287  		insertAccount(conDelNoCache, conDelNoCache[:])
   288  		insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
   289  		insertAccount(conDelCache, conDelCache[:])
   290  		insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
   291  
   292  		insertAccount(conNukeNoCache, conNukeNoCache[:])
   293  		insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
   294  		insertAccount(conNukeCache, conNukeCache[:])
   295  		insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
   296  
   297  		rawdb.WriteSnapshotRoot(db, baseRoot)
   298  
   299  		// Create a disk layer based on the above using a random progress marker
   300  		// and cache in some data.
   301  		snaps := &Tree{
   302  			layers: map[common.Hash]snapshot{
   303  				baseRoot: &diskLayer{
   304  					diskdb: db,
   305  					cache:  fastcache.New(500 * 1024),
   306  					root:   baseRoot,
   307  				},
   308  			},
   309  		}
   310  		snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker
   311  		base := snaps.Snapshot(baseRoot)
   312  
   313  		// assertAccount ensures that an account matches the given blob if it's
   314  		// already covered by the disk snapshot, and errors out otherwise.
   315  		assertAccount := func(account common.Hash, data []byte) {
   316  			t.Helper()
   317  			blob, err := base.AccountRLP(account)
   318  			if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet {
   319  				t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob)
   320  			}
   321  			if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
   322  				t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
   323  			}
   324  		}
   325  		assertAccount(accNoModCache, accNoModCache[:])
   326  		assertAccount(accModCache, accModCache[:])
   327  		assertAccount(accDelCache, accDelCache[:])
   328  
   329  		// assertStorage ensures that a storage slot matches the given blob if
   330  		// it's already covered by the disk snapshot, and errors out otherwise.
   331  		assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
   332  			t.Helper()
   333  			blob, err := base.Storage(account, slot)
   334  			if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet {
   335  				t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
   336  			}
   337  			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
   338  				t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
   339  			}
   340  		}
   341  		assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   342  		assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
   343  		assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
   344  		assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
   345  
   346  		// Modify or delete some accounts, flatten everything onto disk
   347  		if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
   348  			accDelNoCache:  {},
   349  			accDelCache:    {},
   350  			conNukeNoCache: {},
   351  			conNukeCache:   {},
   352  		}, map[common.Hash][]byte{
   353  			accModNoCache: reverse(accModNoCache[:]),
   354  			accModCache:   reverse(accModCache[:]),
   355  		}, map[common.Hash]map[common.Hash][]byte{
   356  			conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
   357  			conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])},
   358  			conDelNoCache: {conDelNoCacheSlot: nil},
   359  			conDelCache:   {conDelCacheSlot: nil},
   360  		}); err != nil {
   361  			t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
   362  		}
   363  		if err := snaps.Cap(diffRoot, 0); err != nil {
   364  			t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err)
   365  		}
   366  		// Retrieve all the data through the disk layer and validate it
   367  		base = snaps.Snapshot(diffRoot)
   368  		if _, ok := base.(*diskLayer); !ok {
   369  			t.Fatalf("test %d: update not flattend into the disk layer", i)
   370  		}
   371  		assertAccount(accNoModNoCache, accNoModNoCache[:])
   372  		assertAccount(accNoModCache, accNoModCache[:])
   373  		assertAccount(accModNoCache, reverse(accModNoCache[:]))
   374  		assertAccount(accModCache, reverse(accModCache[:]))
   375  		assertAccount(accDelNoCache, nil)
   376  		assertAccount(accDelCache, nil)
   377  
   378  		assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   379  		assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   380  		assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   381  		assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   382  		assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
   383  		assertStorage(conDelCache, conDelCacheSlot, nil)
   384  		assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   385  		assertStorage(conNukeCache, conNukeCacheSlot, nil)
   386  
   387  		// Retrieve all the data directly from the database and validate it
   388  
   389  		// assertDatabaseAccount ensures that an account inside the database matches
   390  		// the given blob if it's already covered by the disk snapshot, and does not
   391  		// exist otherwise.
   392  		assertDatabaseAccount := func(account common.Hash, data []byte) {
   393  			t.Helper()
   394  			blob := rawdb.ReadAccountSnapshot(db, account)
   395  			if bytes.Compare(account[:], genMarker) > 0 && blob != nil {
   396  				t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob)
   397  			}
   398  			if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
   399  				t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
   400  			}
   401  		}
   402  		assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
   403  		assertDatabaseAccount(accNoModCache, accNoModCache[:])
   404  		assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
   405  		assertDatabaseAccount(accModCache, reverse(accModCache[:]))
   406  		assertDatabaseAccount(accDelNoCache, nil)
   407  		assertDatabaseAccount(accDelCache, nil)
   408  
   409  		// assertDatabaseStorage ensures that a storage slot inside the database
   410  		// matches the given blob if it's already covered by the disk snapshot,
   411  		// and does not exist otherwise.
   412  		assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
   413  			t.Helper()
   414  			blob := rawdb.ReadStorageSnapshot(db, account, slot)
   415  			if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil {
   416  				t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
   417  			}
   418  			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
   419  				t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
   420  			}
   421  		}
   422  		assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   423  		assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   424  		assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   425  		assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   426  		assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
   427  		assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
   428  		assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   429  		assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
   430  	}
   431  }
   432  
   433  // Tests that when the bottom-most diff layer is merged into the disk
   434  // layer whether the corresponding generator is persisted correctly.
   435  func TestDiskGeneratorPersistence(t *testing.T) {
   436  	var (
   437  		accOne        = randomHash()
   438  		accTwo        = randomHash()
   439  		accOneSlotOne = randomHash()
   440  		accOneSlotTwo = randomHash()
   441  
   442  		accThree     = randomHash()
   443  		accThreeSlot = randomHash()
   444  		baseRoot     = randomHash()
   445  		diffRoot     = randomHash()
   446  		diffTwoRoot  = randomHash()
   447  		genMarker    = append(randomHash().Bytes(), randomHash().Bytes()...)
   448  	)
   449  	// Testing scenario 1, the disk layer is still under the construction.
   450  	db := rawdb.NewMemoryDatabase()
   451  
   452  	rawdb.WriteAccountSnapshot(db, accOne, accOne[:])
   453  	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:])
   454  	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:])
   455  	rawdb.WriteSnapshotRoot(db, baseRoot)
   456  
   457  	// Create a disk layer based on all above updates
   458  	snaps := &Tree{
   459  		layers: map[common.Hash]snapshot{
   460  			baseRoot: &diskLayer{
   461  				diskdb:    db,
   462  				cache:     fastcache.New(500 * 1024),
   463  				root:      baseRoot,
   464  				genMarker: genMarker,
   465  			},
   466  		},
   467  	}
   468  	// Modify or delete some accounts, flatten everything onto disk
   469  	if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
   470  		accTwo: accTwo[:],
   471  	}, nil); err != nil {
   472  		t.Fatalf("failed to update snapshot tree: %v", err)
   473  	}
   474  	if err := snaps.Cap(diffRoot, 0); err != nil {
   475  		t.Fatalf("failed to flatten snapshot tree: %v", err)
   476  	}
   477  	blob := rawdb.ReadSnapshotGenerator(db)
   478  	var generator journalGenerator
   479  	if err := rlp.DecodeBytes(blob, &generator); err != nil {
   480  		t.Fatalf("Failed to decode snapshot generator %v", err)
   481  	}
   482  	if !bytes.Equal(generator.Marker, genMarker) {
   483  		t.Fatalf("Generator marker is not matched")
   484  	}
   485  	// Test scenario 2, the disk layer is fully generated
   486  	// Modify or delete some accounts, flatten everything onto disk
   487  	if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
   488  		accThree: accThree.Bytes(),
   489  	}, map[common.Hash]map[common.Hash][]byte{
   490  		accThree: {accThreeSlot: accThreeSlot.Bytes()},
   491  	}); err != nil {
   492  		t.Fatalf("failed to update snapshot tree: %v", err)
   493  	}
   494  	diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
   495  	diskLayer.genMarker = nil // Construction finished
   496  	if err := snaps.Cap(diffTwoRoot, 0); err != nil {
   497  		t.Fatalf("failed to flatten snapshot tree: %v", err)
   498  	}
   499  	blob = rawdb.ReadSnapshotGenerator(db)
   500  	if err := rlp.DecodeBytes(blob, &generator); err != nil {
   501  		t.Fatalf("Failed to decode snapshot generator %v", err)
   502  	}
   503  	if len(generator.Marker) != 0 {
   504  		t.Fatalf("Failed to update snapshot generator")
   505  	}
   506  }
   507  
   508  // Tests that merging something into a disk layer persists it into the database
   509  // and invalidates any previously written and cached values, discarding anything
   510  // after the in-progress generation marker.
   511  //
   512  // This test case is a tiny specialized case of TestDiskPartialMerge, which tests
   513  // some very specific cornercases that random tests won't ever trigger.
   514  func TestDiskMidAccountPartialMerge(t *testing.T) {
   515  	// TODO(@karalabe) ?
   516  }
   517  
   518  // TestDiskSeek tests that seek-operations work on the disk layer
   519  func TestDiskSeek(t *testing.T) {
   520  	// Create some accounts in the disk layer
   521  	var db ethdb.Database
   522  
   523  	if dir, err := ioutil.TempDir("", "disklayer-test"); err != nil {
   524  		t.Fatal(err)
   525  	} else {
   526  		defer os.RemoveAll(dir)
   527  		diskdb, err := leveldb.New(dir, 256, 0, "", false)
   528  		if err != nil {
   529  			t.Fatal(err)
   530  		}
   531  		db = rawdb.NewDatabase(diskdb)
   532  	}
   533  	// Fill even keys [0,2,4...]
   534  	for i := 0; i < 0xff; i += 2 {
   535  		acc := common.Hash{byte(i)}
   536  		rawdb.WriteAccountSnapshot(db, acc, acc[:])
   537  	}
   538  	// Add an 'higher' key, with incorrect (higher) prefix
   539  	highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1}
   540  	db.Put(highKey, []byte{0xff, 0xff})
   541  
   542  	baseRoot := randomHash()
   543  	rawdb.WriteSnapshotRoot(db, baseRoot)
   544  
   545  	snaps := &Tree{
   546  		layers: map[common.Hash]snapshot{
   547  			baseRoot: &diskLayer{
   548  				diskdb: db,
   549  				cache:  fastcache.New(500 * 1024),
   550  				root:   baseRoot,
   551  			},
   552  		},
   553  	}
   554  	// Test some different seek positions
   555  	type testcase struct {
   556  		pos    byte
   557  		expkey byte
   558  	}
   559  	var cases = []testcase{
   560  		{0xff, 0x55}, // this should exit immediately without checking key
   561  		{0x01, 0x02},
   562  		{0xfe, 0xfe},
   563  		{0xfd, 0xfe},
   564  		{0x00, 0x00},
   565  	}
   566  	for i, tc := range cases {
   567  		it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos})
   568  		if err != nil {
   569  			t.Fatalf("case %d, error: %v", i, err)
   570  		}
   571  		count := 0
   572  		for it.Next() {
   573  			k, v, err := it.Hash()[0], it.Account()[0], it.Error()
   574  			if err != nil {
   575  				t.Fatalf("test %d, item %d, error: %v", i, count, err)
   576  			}
   577  			// First item in iterator should have the expected key
   578  			if count == 0 && k != tc.expkey {
   579  				t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey)
   580  			}
   581  			count++
   582  			if v != k {
   583  				t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k)
   584  			}
   585  		}
   586  	}
   587  }