github.com/core-coin/go-core/v2@v2.1.9/core/state/snapshot/disklayer_test.go (about)

     1  // Copyright 2019 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snapshot
    18  
    19  import (
    20  	"bytes"
    21  	"io/ioutil"
    22  	"os"
    23  	"testing"
    24  
    25  	"github.com/VictoriaMetrics/fastcache"
    26  
    27  	"github.com/core-coin/go-core/v2/xcbdb"
    28  	"github.com/core-coin/go-core/v2/xcbdb/leveldb"
    29  	"github.com/core-coin/go-core/v2/xcbdb/memorydb"
    30  
    31  	"github.com/core-coin/go-core/v2/common"
    32  	"github.com/core-coin/go-core/v2/core/rawdb"
    33  	"github.com/core-coin/go-core/v2/rlp"
    34  )
    35  
    36  // reverse reverses the contents of a byte slice. It's used to update random accs
    37  // with deterministic changes.
    38  func reverse(blob []byte) []byte {
    39  	res := make([]byte, len(blob))
    40  	for i, b := range blob {
    41  		res[len(blob)-1-i] = b
    42  	}
    43  	return res
    44  }
    45  
    46  // Tests that merging something into a disk layer persists it into the database
    47  // and invalidates any previously written and cached values.
    48  func TestDiskMerge(t *testing.T) {
    49  	// Create some accounts in the disk layer
    50  	db := memorydb.New()
    51  
    52  	var (
    53  		accNoModNoCache     = common.Hash{0x1}
    54  		accNoModCache       = common.Hash{0x2}
    55  		accModNoCache       = common.Hash{0x3}
    56  		accModCache         = common.Hash{0x4}
    57  		accDelNoCache       = common.Hash{0x5}
    58  		accDelCache         = common.Hash{0x6}
    59  		conNoModNoCache     = common.Hash{0x7}
    60  		conNoModNoCacheSlot = common.Hash{0x70}
    61  		conNoModCache       = common.Hash{0x8}
    62  		conNoModCacheSlot   = common.Hash{0x80}
    63  		conModNoCache       = common.Hash{0x9}
    64  		conModNoCacheSlot   = common.Hash{0x90}
    65  		conModCache         = common.Hash{0xa}
    66  		conModCacheSlot     = common.Hash{0xa0}
    67  		conDelNoCache       = common.Hash{0xb}
    68  		conDelNoCacheSlot   = common.Hash{0xb0}
    69  		conDelCache         = common.Hash{0xc}
    70  		conDelCacheSlot     = common.Hash{0xc0}
    71  		conNukeNoCache      = common.Hash{0xd}
    72  		conNukeNoCacheSlot  = common.Hash{0xd0}
    73  		conNukeCache        = common.Hash{0xe}
    74  		conNukeCacheSlot    = common.Hash{0xe0}
    75  		baseRoot            = randomHash()
    76  		diffRoot            = randomHash()
    77  	)
    78  
    79  	rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:])
    80  	rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:])
    81  	rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:])
    82  	rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:])
    83  	rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:])
    84  	rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:])
    85  
    86  	rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:])
    87  	rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
    88  	rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:])
    89  	rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
    90  	rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:])
    91  	rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
    92  	rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:])
    93  	rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:])
    94  	rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:])
    95  	rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
    96  	rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:])
    97  	rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:])
    98  
    99  	rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:])
   100  	rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
   101  	rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:])
   102  	rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
   103  
   104  	rawdb.WriteSnapshotRoot(db, baseRoot)
   105  
   106  	// Create a disk layer based on the above and cache in some data
   107  	snaps := &Tree{
   108  		layers: map[common.Hash]snapshot{
   109  			baseRoot: &diskLayer{
   110  				diskdb: db,
   111  				cache:  fastcache.New(500 * 1024),
   112  				root:   baseRoot,
   113  			},
   114  		},
   115  	}
   116  	base := snaps.Snapshot(baseRoot)
   117  	base.AccountRLP(accNoModCache)
   118  	base.AccountRLP(accModCache)
   119  	base.AccountRLP(accDelCache)
   120  	base.Storage(conNoModCache, conNoModCacheSlot)
   121  	base.Storage(conModCache, conModCacheSlot)
   122  	base.Storage(conDelCache, conDelCacheSlot)
   123  	base.Storage(conNukeCache, conNukeCacheSlot)
   124  
   125  	// Modify or delete some accounts, flatten everything onto disk
   126  	if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
   127  		accDelNoCache:  {},
   128  		accDelCache:    {},
   129  		conNukeNoCache: {},
   130  		conNukeCache:   {},
   131  	}, map[common.Hash][]byte{
   132  		accModNoCache: reverse(accModNoCache[:]),
   133  		accModCache:   reverse(accModCache[:]),
   134  	}, map[common.Hash]map[common.Hash][]byte{
   135  		conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
   136  		conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])},
   137  		conDelNoCache: {conDelNoCacheSlot: nil},
   138  		conDelCache:   {conDelCacheSlot: nil},
   139  	}); err != nil {
   140  		t.Fatalf("failed to update snapshot tree: %v", err)
   141  	}
   142  	if err := snaps.Cap(diffRoot, 0); err != nil {
   143  		t.Fatalf("failed to flatten snapshot tree: %v", err)
   144  	}
   145  	// Retrieve all the data through the disk layer and validate it
   146  	base = snaps.Snapshot(diffRoot)
   147  	if _, ok := base.(*diskLayer); !ok {
   148  		t.Fatalf("update not flattend into the disk layer")
   149  	}
   150  
   151  	// assertAccount ensures that an account matches the given blob.
   152  	assertAccount := func(account common.Hash, data []byte) {
   153  		t.Helper()
   154  		blob, err := base.AccountRLP(account)
   155  		if err != nil {
   156  			t.Errorf("account access (%x) failed: %v", account, err)
   157  		} else if !bytes.Equal(blob, data) {
   158  			t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data)
   159  		}
   160  	}
   161  	assertAccount(accNoModNoCache, accNoModNoCache[:])
   162  	assertAccount(accNoModCache, accNoModCache[:])
   163  	assertAccount(accModNoCache, reverse(accModNoCache[:]))
   164  	assertAccount(accModCache, reverse(accModCache[:]))
   165  	assertAccount(accDelNoCache, nil)
   166  	assertAccount(accDelCache, nil)
   167  
   168  	// assertStorage ensures that a storage slot matches the given blob.
   169  	assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
   170  		t.Helper()
   171  		blob, err := base.Storage(account, slot)
   172  		if err != nil {
   173  			t.Errorf("storage access (%x:%x) failed: %v", account, slot, err)
   174  		} else if !bytes.Equal(blob, data) {
   175  			t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
   176  		}
   177  	}
   178  	assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   179  	assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   180  	assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   181  	assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   182  	assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
   183  	assertStorage(conDelCache, conDelCacheSlot, nil)
   184  	assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   185  	assertStorage(conNukeCache, conNukeCacheSlot, nil)
   186  
   187  	// Retrieve all the data directly from the database and validate it
   188  
   189  	// assertDatabaseAccount ensures that an account from the database matches the given blob.
   190  	assertDatabaseAccount := func(account common.Hash, data []byte) {
   191  		t.Helper()
   192  		if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) {
   193  			t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data)
   194  		}
   195  	}
   196  	assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
   197  	assertDatabaseAccount(accNoModCache, accNoModCache[:])
   198  	assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
   199  	assertDatabaseAccount(accModCache, reverse(accModCache[:]))
   200  	assertDatabaseAccount(accDelNoCache, nil)
   201  	assertDatabaseAccount(accDelCache, nil)
   202  
   203  	// assertDatabaseStorage ensures that a storage slot from the database matches the given blob.
   204  	assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
   205  		t.Helper()
   206  		if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) {
   207  			t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data)
   208  		}
   209  	}
   210  	assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   211  	assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   212  	assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   213  	assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   214  	assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
   215  	assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
   216  	assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   217  	assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
   218  }
   219  
   220  // Tests that merging something into a disk layer persists it into the database
   221  // and invalidates any previously written and cached values, discarding anything
   222  // after the in-progress generation marker.
   223  func TestDiskPartialMerge(t *testing.T) {
   224  	// Iterate the test a few times to ensure we pick various internal orderings
   225  	// for the data slots as well as the progress marker.
   226  	for i := 0; i < 1024; i++ {
   227  		// Create some accounts in the disk layer
   228  		db := memorydb.New()
   229  
   230  		var (
   231  			accNoModNoCache     = randomHash()
   232  			accNoModCache       = randomHash()
   233  			accModNoCache       = randomHash()
   234  			accModCache         = randomHash()
   235  			accDelNoCache       = randomHash()
   236  			accDelCache         = randomHash()
   237  			conNoModNoCache     = randomHash()
   238  			conNoModNoCacheSlot = randomHash()
   239  			conNoModCache       = randomHash()
   240  			conNoModCacheSlot   = randomHash()
   241  			conModNoCache       = randomHash()
   242  			conModNoCacheSlot   = randomHash()
   243  			conModCache         = randomHash()
   244  			conModCacheSlot     = randomHash()
   245  			conDelNoCache       = randomHash()
   246  			conDelNoCacheSlot   = randomHash()
   247  			conDelCache         = randomHash()
   248  			conDelCacheSlot     = randomHash()
   249  			conNukeNoCache      = randomHash()
   250  			conNukeNoCacheSlot  = randomHash()
   251  			conNukeCache        = randomHash()
   252  			conNukeCacheSlot    = randomHash()
   253  			baseRoot            = randomHash()
   254  			diffRoot            = randomHash()
   255  			genMarker           = append(randomHash().Bytes(), randomHash().Bytes()...)
   256  		)
   257  
   258  		// insertAccount injects an account into the database if it's after the
   259  		// generator marker, drops the op otherwise. This is needed to seed the
   260  		// database with a valid starting snapshot.
   261  		insertAccount := func(account common.Hash, data []byte) {
   262  			if bytes.Compare(account[:], genMarker) <= 0 {
   263  				rawdb.WriteAccountSnapshot(db, account, data[:])
   264  			}
   265  		}
   266  		insertAccount(accNoModNoCache, accNoModNoCache[:])
   267  		insertAccount(accNoModCache, accNoModCache[:])
   268  		insertAccount(accModNoCache, accModNoCache[:])
   269  		insertAccount(accModCache, accModCache[:])
   270  		insertAccount(accDelNoCache, accDelNoCache[:])
   271  		insertAccount(accDelCache, accDelCache[:])
   272  
   273  		// insertStorage injects a storage slot into the database if it's after
   274  		// the  generator marker, drops the op otherwise. This is needed to seed
   275  		// the  database with a valid starting snapshot.
   276  		insertStorage := func(account common.Hash, slot common.Hash, data []byte) {
   277  			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 {
   278  				rawdb.WriteStorageSnapshot(db, account, slot, data[:])
   279  			}
   280  		}
   281  		insertAccount(conNoModNoCache, conNoModNoCache[:])
   282  		insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   283  		insertAccount(conNoModCache, conNoModCache[:])
   284  		insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   285  		insertAccount(conModNoCache, conModNoCache[:])
   286  		insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:])
   287  		insertAccount(conModCache, conModCache[:])
   288  		insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
   289  		insertAccount(conDelNoCache, conDelNoCache[:])
   290  		insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:])
   291  		insertAccount(conDelCache, conDelCache[:])
   292  		insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
   293  
   294  		insertAccount(conNukeNoCache, conNukeNoCache[:])
   295  		insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:])
   296  		insertAccount(conNukeCache, conNukeCache[:])
   297  		insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
   298  
   299  		rawdb.WriteSnapshotRoot(db, baseRoot)
   300  
   301  		// Create a disk layer based on the above using a random progress marker
   302  		// and cache in some data.
   303  		snaps := &Tree{
   304  			layers: map[common.Hash]snapshot{
   305  				baseRoot: &diskLayer{
   306  					diskdb: db,
   307  					cache:  fastcache.New(500 * 1024),
   308  					root:   baseRoot,
   309  				},
   310  			},
   311  		}
   312  		snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker
   313  		base := snaps.Snapshot(baseRoot)
   314  
   315  		// assertAccount ensures that an account matches the given blob if it's
   316  		// already covered by the disk snapshot, and errors out otherwise.
   317  		assertAccount := func(account common.Hash, data []byte) {
   318  			t.Helper()
   319  			blob, err := base.AccountRLP(account)
   320  			if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet {
   321  				t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob)
   322  			}
   323  			if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
   324  				t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
   325  			}
   326  		}
   327  		assertAccount(accNoModCache, accNoModCache[:])
   328  		assertAccount(accModCache, accModCache[:])
   329  		assertAccount(accDelCache, accDelCache[:])
   330  
   331  		// assertStorage ensures that a storage slot matches the given blob if
   332  		// it's already covered by the disk snapshot, and errors out otherwise.
   333  		assertStorage := func(account common.Hash, slot common.Hash, data []byte) {
   334  			t.Helper()
   335  			blob, err := base.Storage(account, slot)
   336  			if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet {
   337  				t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
   338  			}
   339  			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
   340  				t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
   341  			}
   342  		}
   343  		assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   344  		assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:])
   345  		assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:])
   346  		assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])
   347  
   348  		// Modify or delete some accounts, flatten everything onto disk
   349  		if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
   350  			accDelNoCache:  {},
   351  			accDelCache:    {},
   352  			conNukeNoCache: {},
   353  			conNukeCache:   {},
   354  		}, map[common.Hash][]byte{
   355  			accModNoCache: reverse(accModNoCache[:]),
   356  			accModCache:   reverse(accModCache[:]),
   357  		}, map[common.Hash]map[common.Hash][]byte{
   358  			conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])},
   359  			conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])},
   360  			conDelNoCache: {conDelNoCacheSlot: nil},
   361  			conDelCache:   {conDelCacheSlot: nil},
   362  		}); err != nil {
   363  			t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
   364  		}
   365  		if err := snaps.Cap(diffRoot, 0); err != nil {
   366  			t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err)
   367  		}
   368  		// Retrieve all the data through the disk layer and validate it
   369  		base = snaps.Snapshot(diffRoot)
   370  		if _, ok := base.(*diskLayer); !ok {
   371  			t.Fatalf("test %d: update not flattend into the disk layer", i)
   372  		}
   373  		assertAccount(accNoModNoCache, accNoModNoCache[:])
   374  		assertAccount(accNoModCache, accNoModCache[:])
   375  		assertAccount(accModNoCache, reverse(accModNoCache[:]))
   376  		assertAccount(accModCache, reverse(accModCache[:]))
   377  		assertAccount(accDelNoCache, nil)
   378  		assertAccount(accDelCache, nil)
   379  
   380  		assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   381  		assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   382  		assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   383  		assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   384  		assertStorage(conDelNoCache, conDelNoCacheSlot, nil)
   385  		assertStorage(conDelCache, conDelCacheSlot, nil)
   386  		assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   387  		assertStorage(conNukeCache, conNukeCacheSlot, nil)
   388  
   389  		// Retrieve all the data directly from the database and validate it
   390  
   391  		// assertDatabaseAccount ensures that an account inside the database matches
   392  		// the given blob if it's already covered by the disk snapshot, and does not
   393  		// exist otherwise.
   394  		assertDatabaseAccount := func(account common.Hash, data []byte) {
   395  			t.Helper()
   396  			blob := rawdb.ReadAccountSnapshot(db, account)
   397  			if bytes.Compare(account[:], genMarker) > 0 && blob != nil {
   398  				t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob)
   399  			}
   400  			if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) {
   401  				t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data)
   402  			}
   403  		}
   404  		assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:])
   405  		assertDatabaseAccount(accNoModCache, accNoModCache[:])
   406  		assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:]))
   407  		assertDatabaseAccount(accModCache, reverse(accModCache[:]))
   408  		assertDatabaseAccount(accDelNoCache, nil)
   409  		assertDatabaseAccount(accDelCache, nil)
   410  
   411  		// assertDatabaseStorage ensures that a storage slot inside the database
   412  		// matches the given blob if it's already covered by the disk snapshot,
   413  		// and does not exist otherwise.
   414  		assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) {
   415  			t.Helper()
   416  			blob := rawdb.ReadStorageSnapshot(db, account, slot)
   417  			if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil {
   418  				t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob)
   419  			}
   420  			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) {
   421  				t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data)
   422  			}
   423  		}
   424  		assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:])
   425  		assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:])
   426  		assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:]))
   427  		assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:]))
   428  		assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil)
   429  		assertDatabaseStorage(conDelCache, conDelCacheSlot, nil)
   430  		assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil)
   431  		assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil)
   432  	}
   433  }
   434  
   435  // Tests that when the bottom-most diff layer is merged into the disk
   436  // layer whether the corresponding generator is persisted correctly.
   437  func TestDiskGeneratorPersistence(t *testing.T) {
   438  	var (
   439  		accOne        = randomHash()
   440  		accTwo        = randomHash()
   441  		accOneSlotOne = randomHash()
   442  		accOneSlotTwo = randomHash()
   443  
   444  		accThree     = randomHash()
   445  		accThreeSlot = randomHash()
   446  		baseRoot     = randomHash()
   447  		diffRoot     = randomHash()
   448  		diffTwoRoot  = randomHash()
   449  		genMarker    = append(randomHash().Bytes(), randomHash().Bytes()...)
   450  	)
   451  	// Testing scenario 1, the disk layer is still under the construction.
   452  	db := rawdb.NewMemoryDatabase()
   453  
   454  	rawdb.WriteAccountSnapshot(db, accOne, accOne[:])
   455  	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotOne, accOneSlotOne[:])
   456  	rawdb.WriteStorageSnapshot(db, accOne, accOneSlotTwo, accOneSlotTwo[:])
   457  	rawdb.WriteSnapshotRoot(db, baseRoot)
   458  
   459  	// Create a disk layer based on all above updates
   460  	snaps := &Tree{
   461  		layers: map[common.Hash]snapshot{
   462  			baseRoot: &diskLayer{
   463  				diskdb:    db,
   464  				cache:     fastcache.New(500 * 1024),
   465  				root:      baseRoot,
   466  				genMarker: genMarker,
   467  			},
   468  		},
   469  	}
   470  	// Modify or delete some accounts, flatten everything onto disk
   471  	if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
   472  		accTwo: accTwo[:],
   473  	}, nil); err != nil {
   474  		t.Fatalf("failed to update snapshot tree: %v", err)
   475  	}
   476  	if err := snaps.Cap(diffRoot, 0); err != nil {
   477  		t.Fatalf("failed to flatten snapshot tree: %v", err)
   478  	}
   479  	blob := rawdb.ReadSnapshotGenerator(db)
   480  	var generator journalGenerator
   481  	if err := rlp.DecodeBytes(blob, &generator); err != nil {
   482  		t.Fatalf("Failed to decode snapshot generator %v", err)
   483  	}
   484  	if !bytes.Equal(generator.Marker, genMarker) {
   485  		t.Fatalf("Generator marker is not matched")
   486  	}
   487  	// Test senario 2, the disk layer is fully generated
   488  	// Modify or delete some accounts, flatten everything onto disk
   489  	if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
   490  		accThree: accThree.Bytes(),
   491  	}, map[common.Hash]map[common.Hash][]byte{
   492  		accThree: {accThreeSlot: accThreeSlot.Bytes()},
   493  	}); err != nil {
   494  		t.Fatalf("failed to update snapshot tree: %v", err)
   495  	}
   496  	diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)
   497  	diskLayer.genMarker = nil // Construction finished
   498  	if err := snaps.Cap(diffTwoRoot, 0); err != nil {
   499  		t.Fatalf("failed to flatten snapshot tree: %v", err)
   500  	}
   501  	blob = rawdb.ReadSnapshotGenerator(db)
   502  	if err := rlp.DecodeBytes(blob, &generator); err != nil {
   503  		t.Fatalf("Failed to decode snapshot generator %v", err)
   504  	}
   505  	if len(generator.Marker) != 0 {
   506  		t.Fatalf("Failed to update snapshot generator")
   507  	}
   508  }
   509  
   510  // Tests that merging something into a disk layer persists it into the database
   511  // and invalidates any previously written and cached values, discarding anything
   512  // after the in-progress generation marker.
   513  //
   514  // This test case is a tiny specialized case of TestDiskPartialMerge, which tests
   515  // some very specific cornercases that random tests won't ever trigger.
   516  func TestDiskMidAccountPartialMerge(t *testing.T) {
   517  	// TODO(@raisty) ?
   518  }
   519  
   520  // TestDiskSeek tests that seek-operations work on the disk layer
   521  func TestDiskSeek(t *testing.T) {
   522  	// Create some accounts in the disk layer
   523  	var db xcbdb.Database
   524  
   525  	if dir, err := ioutil.TempDir("", "disklayer-test"); err != nil {
   526  		t.Fatal(err)
   527  	} else {
   528  		defer os.RemoveAll(dir)
   529  		diskdb, err := leveldb.New(dir, 256, 0, "")
   530  		if err != nil {
   531  			t.Fatal(err)
   532  		}
   533  		db = rawdb.NewDatabase(diskdb)
   534  	}
   535  	// Fill even keys [0,2,4...]
   536  	for i := 0; i < 0xff; i += 2 {
   537  		acc := common.Hash{byte(i)}
   538  		rawdb.WriteAccountSnapshot(db, acc, acc[:])
   539  	}
   540  	// Add an 'higher' key, with incorrect (higher) prefix
   541  	highKey := []byte{rawdb.SnapshotAccountPrefix[0] + 1}
   542  	db.Put(highKey, []byte{0xff, 0xff})
   543  
   544  	baseRoot := randomHash()
   545  	rawdb.WriteSnapshotRoot(db, baseRoot)
   546  
   547  	snaps := &Tree{
   548  		layers: map[common.Hash]snapshot{
   549  			baseRoot: &diskLayer{
   550  				diskdb: db,
   551  				cache:  fastcache.New(500 * 1024),
   552  				root:   baseRoot,
   553  			},
   554  		},
   555  	}
   556  	// Test some different seek positions
   557  	type testcase struct {
   558  		pos    byte
   559  		expkey byte
   560  	}
   561  	var cases = []testcase{
   562  		{0xff, 0x55}, // this should exit immediately without checking key
   563  		{0x01, 0x02},
   564  		{0xfe, 0xfe},
   565  		{0xfd, 0xfe},
   566  		{0x00, 0x00},
   567  	}
   568  	for i, tc := range cases {
   569  		it, err := snaps.AccountIterator(baseRoot, common.Hash{tc.pos})
   570  		if err != nil {
   571  			t.Fatalf("case %d, error: %v", i, err)
   572  		}
   573  		count := 0
   574  		for it.Next() {
   575  			k, v, err := it.Hash()[0], it.Account()[0], it.Error()
   576  			if err != nil {
   577  				t.Fatalf("test %d, item %d, error: %v", i, count, err)
   578  			}
   579  			// First item in iterator should have the expected key
   580  			if count == 0 && k != tc.expkey {
   581  				t.Fatalf("test %d, item %d, got %v exp %v", i, count, k, tc.expkey)
   582  			}
   583  			count++
   584  			if v != k {
   585  				t.Fatalf("test %d, item %d, value wrong, got %v exp %v", i, count, v, k)
   586  			}
   587  		}
   588  	}
   589  }