github.com/ethereum/go-ethereum@v1.16.1/triedb/pathdb/database_test.go (about)

     1  // Copyright 2022 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package pathdb
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	"math/rand"
    24  	"testing"
    25  
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/core/rawdb"
    28  	"github.com/ethereum/go-ethereum/core/types"
    29  	"github.com/ethereum/go-ethereum/crypto"
    30  	"github.com/ethereum/go-ethereum/internal/testrand"
    31  	"github.com/ethereum/go-ethereum/rlp"
    32  	"github.com/ethereum/go-ethereum/trie"
    33  	"github.com/ethereum/go-ethereum/trie/trienode"
    34  	"github.com/holiman/uint256"
    35  )
    36  
    37  func updateTrie(db *Database, stateRoot common.Hash, addrHash common.Hash, root common.Hash, dirties map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) {
    38  	var id *trie.ID
    39  	if addrHash == (common.Hash{}) {
    40  		id = trie.StateTrieID(stateRoot)
    41  	} else {
    42  		id = trie.StorageTrieID(stateRoot, addrHash, root)
    43  	}
    44  	tr, err := trie.New(id, db)
    45  	if err != nil {
    46  		panic(fmt.Errorf("failed to load trie, err: %w", err))
    47  	}
    48  	for key, val := range dirties {
    49  		if len(val) == 0 {
    50  			tr.Delete(key.Bytes())
    51  		} else {
    52  			tr.Update(key.Bytes(), val)
    53  		}
    54  	}
    55  	return tr.Commit(false)
    56  }
    57  
    58  func generateAccount(storageRoot common.Hash) types.StateAccount {
    59  	return types.StateAccount{
    60  		Nonce:    uint64(rand.Intn(100)),
    61  		Balance:  uint256.NewInt(rand.Uint64()),
    62  		CodeHash: testrand.Bytes(32),
    63  		Root:     storageRoot,
    64  	}
    65  }
    66  
    67  const (
    68  	createAccountOp int = iota
    69  	modifyAccountOp
    70  	deleteAccountOp
    71  	opLen
    72  )
    73  
    74  type genctx struct {
    75  	stateRoot     common.Hash
    76  	accounts      map[common.Hash][]byte                    // Keyed by the hash of account address
    77  	storages      map[common.Hash]map[common.Hash][]byte    // Keyed by the hash of account address and the hash of storage key
    78  	accountOrigin map[common.Address][]byte                 // Keyed by the account address
    79  	storageOrigin map[common.Address]map[common.Hash][]byte // Keyed by the account address and the hash of storage key
    80  	nodes         *trienode.MergedNodeSet
    81  }
    82  
    83  func newCtx(stateRoot common.Hash) *genctx {
    84  	return &genctx{
    85  		stateRoot:     stateRoot,
    86  		accounts:      make(map[common.Hash][]byte),
    87  		storages:      make(map[common.Hash]map[common.Hash][]byte),
    88  		accountOrigin: make(map[common.Address][]byte),
    89  		storageOrigin: make(map[common.Address]map[common.Hash][]byte),
    90  		nodes:         trienode.NewMergedNodeSet(),
    91  	}
    92  }
    93  
    94  func (ctx *genctx) storageOriginSet(rawStorageKey bool, t *tester) map[common.Address]map[common.Hash][]byte {
    95  	if !rawStorageKey {
    96  		return ctx.storageOrigin
    97  	}
    98  	set := make(map[common.Address]map[common.Hash][]byte)
    99  	for addr, storage := range ctx.storageOrigin {
   100  		subset := make(map[common.Hash][]byte)
   101  		for hash, val := range storage {
   102  			key := t.hashPreimage(hash)
   103  			subset[key] = val
   104  		}
   105  		set[addr] = subset
   106  	}
   107  	return set
   108  }
   109  
   110  type tester struct {
   111  	db        *Database
   112  	roots     []common.Hash
   113  	preimages map[common.Hash][]byte
   114  
   115  	// current state set
   116  	accounts map[common.Hash][]byte                 // Keyed by the hash of account address
   117  	storages map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address and the hash of storage key
   118  
   119  	// state snapshots
   120  	snapAccounts map[common.Hash]map[common.Hash][]byte                 // Keyed by the hash of account address
   121  	snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte // Keyed by the hash of account address and the hash of storage key
   122  }
   123  
   124  func newTester(t *testing.T, historyLimit uint64, isVerkle bool, layers int, enableIndex bool) *tester {
   125  	var (
   126  		disk, _ = rawdb.Open(rawdb.NewMemoryDatabase(), rawdb.OpenOptions{Ancient: t.TempDir()})
   127  		db      = New(disk, &Config{
   128  			StateHistory:        historyLimit,
   129  			EnableStateIndexing: enableIndex,
   130  			TrieCleanSize:       256 * 1024,
   131  			StateCleanSize:      256 * 1024,
   132  			WriteBufferSize:     256 * 1024,
   133  			NoAsyncFlush:        true,
   134  		}, isVerkle)
   135  
   136  		obj = &tester{
   137  			db:           db,
   138  			preimages:    make(map[common.Hash][]byte),
   139  			accounts:     make(map[common.Hash][]byte),
   140  			storages:     make(map[common.Hash]map[common.Hash][]byte),
   141  			snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
   142  			snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte),
   143  		}
   144  	)
   145  	for i := 0; i < layers; i++ {
   146  		var parent = types.EmptyRootHash
   147  		if len(obj.roots) != 0 {
   148  			parent = obj.roots[len(obj.roots)-1]
   149  		}
   150  		root, nodes, states := obj.generate(parent, i > 6)
   151  
   152  		if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
   153  			panic(fmt.Errorf("failed to update state changes, err: %w", err))
   154  		}
   155  		obj.roots = append(obj.roots, root)
   156  	}
   157  	return obj
   158  }
   159  
   160  func (t *tester) accountPreimage(hash common.Hash) common.Address {
   161  	return common.BytesToAddress(t.preimages[hash])
   162  }
   163  
   164  func (t *tester) hashPreimage(hash common.Hash) common.Hash {
   165  	return common.BytesToHash(t.preimages[hash])
   166  }
   167  
   168  func (t *tester) extend(layers int) {
   169  	for i := 0; i < layers; i++ {
   170  		var parent = types.EmptyRootHash
   171  		if len(t.roots) != 0 {
   172  			parent = t.roots[len(t.roots)-1]
   173  		}
   174  		root, nodes, states := t.generate(parent, true)
   175  		if err := t.db.Update(root, parent, uint64(i), nodes, states); err != nil {
   176  			panic(fmt.Errorf("failed to update state changes, err: %w", err))
   177  		}
   178  		t.roots = append(t.roots, root)
   179  	}
   180  }
   181  
   182  func (t *tester) release() {
   183  	t.db.Close()
   184  	t.db.diskdb.Close()
   185  }
   186  
   187  func (t *tester) randAccount() (common.Address, []byte) {
   188  	for addrHash, account := range t.accounts {
   189  		return t.accountPreimage(addrHash), account
   190  	}
   191  	return common.Address{}, nil
   192  }
   193  
   194  func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
   195  	var (
   196  		addrHash = crypto.Keccak256Hash(addr.Bytes())
   197  		storage  = make(map[common.Hash][]byte)
   198  		origin   = make(map[common.Hash][]byte)
   199  	)
   200  	for i := 0; i < 10; i++ {
   201  		v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
   202  		key := testrand.Bytes(32)
   203  		hash := crypto.Keccak256Hash(key)
   204  		t.preimages[hash] = key
   205  
   206  		storage[hash] = v
   207  		origin[hash] = nil
   208  	}
   209  	root, set := updateTrie(t.db, ctx.stateRoot, addrHash, types.EmptyRootHash, storage)
   210  
   211  	ctx.storages[addrHash] = storage
   212  	ctx.storageOrigin[addr] = origin
   213  	ctx.nodes.Merge(set)
   214  	return root
   215  }
   216  
   217  func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
   218  	var (
   219  		addrHash = crypto.Keccak256Hash(addr.Bytes())
   220  		storage  = make(map[common.Hash][]byte)
   221  		origin   = make(map[common.Hash][]byte)
   222  	)
   223  	for hash, val := range t.storages[addrHash] {
   224  		origin[hash] = val
   225  		storage[hash] = nil
   226  
   227  		if len(origin) == 3 {
   228  			break
   229  		}
   230  	}
   231  	for i := 0; i < 3; i++ {
   232  		v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
   233  		key := testrand.Bytes(32)
   234  		hash := crypto.Keccak256Hash(key)
   235  		t.preimages[hash] = key
   236  
   237  		storage[hash] = v
   238  		origin[hash] = nil
   239  	}
   240  	root, set := updateTrie(t.db, ctx.stateRoot, crypto.Keccak256Hash(addr.Bytes()), root, storage)
   241  
   242  	ctx.storages[addrHash] = storage
   243  	ctx.storageOrigin[addr] = origin
   244  	ctx.nodes.Merge(set)
   245  	return root
   246  }
   247  
   248  func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
   249  	var (
   250  		addrHash = crypto.Keccak256Hash(addr.Bytes())
   251  		storage  = make(map[common.Hash][]byte)
   252  		origin   = make(map[common.Hash][]byte)
   253  	)
   254  	for hash, val := range t.storages[addrHash] {
   255  		origin[hash] = val
   256  		storage[hash] = nil
   257  	}
   258  	root, set := updateTrie(t.db, ctx.stateRoot, addrHash, root, storage)
   259  	if root != types.EmptyRootHash {
   260  		panic("failed to clear storage trie")
   261  	}
   262  	ctx.storages[addrHash] = storage
   263  	ctx.storageOrigin[addr] = origin
   264  	ctx.nodes.Merge(set)
   265  	return root
   266  }
   267  
   268  func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) {
   269  	var (
   270  		ctx     = newCtx(parent)
   271  		dirties = make(map[common.Hash]struct{})
   272  	)
   273  	for i := 0; i < 20; i++ {
   274  		// Start with account creation always
   275  		op := createAccountOp
   276  		if i > 0 {
   277  			op = rand.Intn(opLen)
   278  		}
   279  		switch op {
   280  		case createAccountOp:
   281  			// account creation
   282  			addr := testrand.Address()
   283  			addrHash := crypto.Keccak256Hash(addr.Bytes())
   284  
   285  			// Short circuit if the account was already existent
   286  			if _, ok := t.accounts[addrHash]; ok {
   287  				continue
   288  			}
   289  			// Short circuit if the account has been modified within the same transition
   290  			if _, ok := dirties[addrHash]; ok {
   291  				continue
   292  			}
   293  			dirties[addrHash] = struct{}{}
   294  
   295  			root := t.generateStorage(ctx, addr)
   296  			ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
   297  			ctx.accountOrigin[addr] = nil
   298  			t.preimages[addrHash] = addr.Bytes()
   299  
   300  		case modifyAccountOp:
   301  			// account mutation
   302  			addr, account := t.randAccount()
   303  			if addr == (common.Address{}) {
   304  				continue
   305  			}
   306  			addrHash := crypto.Keccak256Hash(addr.Bytes())
   307  
   308  			// short circuit if the account has been modified within the same transition
   309  			if _, ok := dirties[addrHash]; ok {
   310  				continue
   311  			}
   312  			dirties[addrHash] = struct{}{}
   313  
   314  			acct, _ := types.FullAccount(account)
   315  			stRoot := t.mutateStorage(ctx, addr, acct.Root)
   316  			newAccount := types.SlimAccountRLP(generateAccount(stRoot))
   317  
   318  			ctx.accounts[addrHash] = newAccount
   319  			ctx.accountOrigin[addr] = account
   320  
   321  		case deleteAccountOp:
   322  			// account deletion
   323  			addr, account := t.randAccount()
   324  			if addr == (common.Address{}) {
   325  				continue
   326  			}
   327  			addrHash := crypto.Keccak256Hash(addr.Bytes())
   328  
   329  			// short circuit if the account has been modified within the same transition
   330  			if _, ok := dirties[addrHash]; ok {
   331  				continue
   332  			}
   333  			dirties[addrHash] = struct{}{}
   334  
   335  			acct, _ := types.FullAccount(account)
   336  			if acct.Root != types.EmptyRootHash {
   337  				t.clearStorage(ctx, addr, acct.Root)
   338  			}
   339  			ctx.accounts[addrHash] = nil
   340  			ctx.accountOrigin[addr] = account
   341  		}
   342  	}
   343  	root, set := updateTrie(t.db, parent, common.Hash{}, parent, ctx.accounts)
   344  	ctx.nodes.Merge(set)
   345  
   346  	// Save state snapshot before commit
   347  	t.snapAccounts[parent] = copyAccounts(t.accounts)
   348  	t.snapStorages[parent] = copyStorages(t.storages)
   349  
   350  	// Commit all changes to live state set
   351  	for addrHash, account := range ctx.accounts {
   352  		if len(account) == 0 {
   353  			delete(t.accounts, addrHash)
   354  		} else {
   355  			t.accounts[addrHash] = account
   356  		}
   357  	}
   358  	for addrHash, slots := range ctx.storages {
   359  		if _, ok := t.storages[addrHash]; !ok {
   360  			t.storages[addrHash] = make(map[common.Hash][]byte)
   361  		}
   362  		for sHash, slot := range slots {
   363  			if len(slot) == 0 {
   364  				delete(t.storages[addrHash], sHash)
   365  			} else {
   366  				t.storages[addrHash][sHash] = slot
   367  			}
   368  		}
   369  		if len(t.storages[addrHash]) == 0 {
   370  			delete(t.storages, addrHash)
   371  		}
   372  	}
   373  	storageOrigin := ctx.storageOriginSet(rawStorageKey, t)
   374  	return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, storageOrigin, rawStorageKey)
   375  }
   376  
   377  // lastHash returns the latest root hash, or empty if nothing is cached.
   378  func (t *tester) lastHash() common.Hash {
   379  	if len(t.roots) == 0 {
   380  		return common.Hash{}
   381  	}
   382  	return t.roots[len(t.roots)-1]
   383  }
   384  
   385  func (t *tester) verifyState(root common.Hash) error {
   386  	tr, err := trie.New(trie.StateTrieID(root), t.db)
   387  	if err != nil {
   388  		return err
   389  	}
   390  	for addrHash, account := range t.snapAccounts[root] {
   391  		blob, err := tr.Get(addrHash.Bytes())
   392  		if err != nil || !bytes.Equal(blob, account) {
   393  			return fmt.Errorf("account is mismatched: %w", err)
   394  		}
   395  	}
   396  	for addrHash, slots := range t.snapStorages[root] {
   397  		blob := t.snapAccounts[root][addrHash]
   398  		if len(blob) == 0 {
   399  			return fmt.Errorf("account %x is missing", addrHash)
   400  		}
   401  		account := new(types.StateAccount)
   402  		if err := rlp.DecodeBytes(blob, account); err != nil {
   403  			return err
   404  		}
   405  		storageIt, err := trie.New(trie.StorageTrieID(root, addrHash, account.Root), t.db)
   406  		if err != nil {
   407  			return err
   408  		}
   409  		for hash, slot := range slots {
   410  			blob, err := storageIt.Get(hash.Bytes())
   411  			if err != nil || !bytes.Equal(blob, slot) {
   412  				return fmt.Errorf("slot is mismatched: %w", err)
   413  			}
   414  		}
   415  	}
   416  	return nil
   417  }
   418  
   419  func (t *tester) verifyHistory() error {
   420  	bottom := t.bottomIndex()
   421  	for i, root := range t.roots {
   422  		// The state history related to the state above disk layer should not exist.
   423  		if i > bottom {
   424  			_, err := readHistory(t.db.freezer, uint64(i+1))
   425  			if err == nil {
   426  				return errors.New("unexpected state history")
   427  			}
   428  			continue
   429  		}
   430  		// The state history related to the state below or equal to the disk layer
   431  		// should exist.
   432  		obj, err := readHistory(t.db.freezer, uint64(i+1))
   433  		if err != nil {
   434  			return err
   435  		}
   436  		parent := types.EmptyRootHash
   437  		if i != 0 {
   438  			parent = t.roots[i-1]
   439  		}
   440  		if obj.meta.parent != parent {
   441  			return fmt.Errorf("unexpected parent, want: %x, got: %x", parent, obj.meta.parent)
   442  		}
   443  		if obj.meta.root != root {
   444  			return fmt.Errorf("unexpected root, want: %x, got: %x", root, obj.meta.root)
   445  		}
   446  	}
   447  	return nil
   448  }
   449  
   450  // bottomIndex returns the index of current disk layer.
   451  func (t *tester) bottomIndex() int {
   452  	bottom := t.db.tree.bottom()
   453  	for i := 0; i < len(t.roots); i++ {
   454  		if t.roots[i] == bottom.rootHash() {
   455  			return i
   456  		}
   457  	}
   458  	return -1
   459  }
   460  
   461  func TestDatabaseRollback(t *testing.T) {
   462  	// Redefine the diff layer depth allowance for faster testing.
   463  	maxDiffLayers = 4
   464  	defer func() {
   465  		maxDiffLayers = 128
   466  	}()
   467  
   468  	// Verify state histories
   469  	tester := newTester(t, 0, false, 32, false)
   470  	defer tester.release()
   471  
   472  	if err := tester.verifyHistory(); err != nil {
   473  		t.Fatalf("Invalid state history, err: %v", err)
   474  	}
   475  	// Revert database from top to bottom
   476  	for i := tester.bottomIndex(); i >= 0; i-- {
   477  		parent := types.EmptyRootHash
   478  		if i > 0 {
   479  			parent = tester.roots[i-1]
   480  		}
   481  		if err := tester.db.Recover(parent); err != nil {
   482  			t.Fatalf("Failed to revert db, err: %v", err)
   483  		}
   484  		if i > 0 {
   485  			if err := tester.verifyState(parent); err != nil {
   486  				t.Fatalf("Failed to verify state, err: %v", err)
   487  			}
   488  		}
   489  	}
   490  	if tester.db.tree.len() != 1 {
   491  		t.Fatal("Only disk layer is expected")
   492  	}
   493  }
   494  
   495  func TestDatabaseRecoverable(t *testing.T) {
   496  	// Redefine the diff layer depth allowance for faster testing.
   497  	maxDiffLayers = 4
   498  	defer func() {
   499  		maxDiffLayers = 128
   500  	}()
   501  
   502  	var (
   503  		tester = newTester(t, 0, false, 12, false)
   504  		index  = tester.bottomIndex()
   505  	)
   506  	defer tester.release()
   507  
   508  	var cases = []struct {
   509  		root   common.Hash
   510  		expect bool
   511  	}{
   512  		// Unknown state should be unrecoverable
   513  		{common.Hash{0x1}, false},
   514  
   515  		// Initial state should be recoverable
   516  		{types.EmptyRootHash, true},
   517  
   518  		// common.Hash{} is not a valid state root for revert
   519  		{common.Hash{}, false},
   520  
   521  		// Layers below current disk layer are recoverable
   522  		{tester.roots[index-1], true},
   523  
   524  		// Disklayer itself is not recoverable, since it's
   525  		// available for accessing.
   526  		{tester.roots[index], false},
   527  
   528  		// Layers above current disk layer are not recoverable
   529  		// since they are available for accessing.
   530  		{tester.roots[index+1], false},
   531  	}
   532  	for i, c := range cases {
   533  		result := tester.db.Recoverable(c.root)
   534  		if result != c.expect {
   535  			t.Fatalf("case: %d, unexpected result, want %t, got %t", i, c.expect, result)
   536  		}
   537  	}
   538  }
   539  
   540  func TestDisable(t *testing.T) {
   541  	// Redefine the diff layer depth allowance for faster testing.
   542  	maxDiffLayers = 4
   543  	defer func() {
   544  		maxDiffLayers = 128
   545  	}()
   546  
   547  	tester := newTester(t, 0, false, 32, false)
   548  	defer tester.release()
   549  
   550  	stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
   551  	if err := tester.db.Disable(); err != nil {
   552  		t.Fatalf("Failed to deactivate database: %v", err)
   553  	}
   554  	if err := tester.db.Enable(types.EmptyRootHash); err == nil {
   555  		t.Fatal("Invalid activation should be rejected")
   556  	}
   557  	if err := tester.db.Enable(stored); err != nil {
   558  		t.Fatalf("Failed to activate database: %v", err)
   559  	}
   560  
   561  	// Ensure journal is deleted from disk
   562  	if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 {
   563  		t.Fatal("Failed to clean journal")
   564  	}
   565  	// Ensure all trie histories are removed
   566  	n, err := tester.db.freezer.Ancients()
   567  	if err != nil {
   568  		t.Fatal("Failed to clean state history")
   569  	}
   570  	if n != 0 {
   571  		t.Fatal("Failed to clean state history")
   572  	}
   573  	// Verify layer tree structure, single disk layer is expected
   574  	if tester.db.tree.len() != 1 {
   575  		t.Fatalf("Extra layer kept %d", tester.db.tree.len())
   576  	}
   577  	if tester.db.tree.bottom().rootHash() != stored {
   578  		t.Fatalf("Root hash is not matched exp %x got %x", stored, tester.db.tree.bottom().rootHash())
   579  	}
   580  }
   581  
   582  func TestCommit(t *testing.T) {
   583  	// Redefine the diff layer depth allowance for faster testing.
   584  	maxDiffLayers = 4
   585  	defer func() {
   586  		maxDiffLayers = 128
   587  	}()
   588  
   589  	tester := newTester(t, 0, false, 12, false)
   590  	defer tester.release()
   591  
   592  	if err := tester.db.Commit(tester.lastHash(), false); err != nil {
   593  		t.Fatalf("Failed to cap database, err: %v", err)
   594  	}
   595  	// Verify layer tree structure, single disk layer is expected
   596  	if tester.db.tree.len() != 1 {
   597  		t.Fatal("Layer tree structure is invalid")
   598  	}
   599  	if tester.db.tree.bottom().rootHash() != tester.lastHash() {
   600  		t.Fatal("Layer tree structure is invalid")
   601  	}
   602  	// Verify states
   603  	if err := tester.verifyState(tester.lastHash()); err != nil {
   604  		t.Fatalf("State is invalid, err: %v", err)
   605  	}
   606  	// Verify state histories
   607  	if err := tester.verifyHistory(); err != nil {
   608  		t.Fatalf("State history is invalid, err: %v", err)
   609  	}
   610  }
   611  
   612  func TestJournal(t *testing.T) {
   613  	// Redefine the diff layer depth allowance for faster testing.
   614  	maxDiffLayers = 4
   615  	defer func() {
   616  		maxDiffLayers = 128
   617  	}()
   618  
   619  	tester := newTester(t, 0, false, 12, false)
   620  	defer tester.release()
   621  
   622  	if err := tester.db.Journal(tester.lastHash()); err != nil {
   623  		t.Errorf("Failed to journal, err: %v", err)
   624  	}
   625  	tester.db.Close()
   626  	tester.db = New(tester.db.diskdb, nil, false)
   627  
   628  	// Verify states including disk layer and all diff on top.
   629  	for i := 0; i < len(tester.roots); i++ {
   630  		if i >= tester.bottomIndex() {
   631  			if err := tester.verifyState(tester.roots[i]); err != nil {
   632  				t.Fatalf("Invalid state, err: %v", err)
   633  			}
   634  			continue
   635  		}
   636  		if err := tester.verifyState(tester.roots[i]); err == nil {
   637  			t.Fatal("Unexpected state")
   638  		}
   639  	}
   640  }
   641  
   642  func TestCorruptedJournal(t *testing.T) {
   643  	// Redefine the diff layer depth allowance for faster testing.
   644  	maxDiffLayers = 4
   645  	defer func() {
   646  		maxDiffLayers = 128
   647  	}()
   648  
   649  	tester := newTester(t, 0, false, 12, false)
   650  	defer tester.release()
   651  
   652  	if err := tester.db.Journal(tester.lastHash()); err != nil {
   653  		t.Errorf("Failed to journal, err: %v", err)
   654  	}
   655  	tester.db.Close()
   656  	root := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
   657  
   658  	// Mutate the journal in disk, it should be regarded as invalid
   659  	blob := rawdb.ReadTrieJournal(tester.db.diskdb)
   660  	blob[0] = 0xa
   661  	rawdb.WriteTrieJournal(tester.db.diskdb, blob)
   662  
   663  	// Verify states, all not-yet-written states should be discarded
   664  	tester.db = New(tester.db.diskdb, nil, false)
   665  	for i := 0; i < len(tester.roots); i++ {
   666  		if tester.roots[i] == root {
   667  			if err := tester.verifyState(root); err != nil {
   668  				t.Fatalf("Disk state is corrupted, err: %v", err)
   669  			}
   670  			continue
   671  		}
   672  		if err := tester.verifyState(tester.roots[i]); err == nil {
   673  			t.Fatal("Unexpected state")
   674  		}
   675  	}
   676  }
   677  
   678  // TestTailTruncateHistory function is designed to test a specific edge case where,
   679  // when history objects are removed from the end, it should trigger a state flush
   680  // if the ID of the new tail object is even higher than the persisted state ID.
   681  //
   682  // For example, let's say the ID of the persistent state is 10, and the current
   683  // history objects range from ID(5) to ID(15). As we accumulate six more objects,
   684  // the history will expand to cover ID(11) to ID(21). ID(11) then becomes the
   685  // oldest history object, and its ID is even higher than the stored state.
   686  //
   687  // In this scenario, it is mandatory to update the persistent state before
   688  // truncating the tail histories. This ensures that the ID of the persistent state
   689  // always falls within the range of [oldest-history-id, latest-history-id].
   690  func TestTailTruncateHistory(t *testing.T) {
   691  	// Redefine the diff layer depth allowance for faster testing.
   692  	maxDiffLayers = 4
   693  	defer func() {
   694  		maxDiffLayers = 128
   695  	}()
   696  
   697  	tester := newTester(t, 10, false, 12, false)
   698  	defer tester.release()
   699  
   700  	tester.db.Close()
   701  	tester.db = New(tester.db.diskdb, &Config{StateHistory: 10}, false)
   702  
   703  	head, err := tester.db.freezer.Ancients()
   704  	if err != nil {
   705  		t.Fatalf("Failed to obtain freezer head")
   706  	}
   707  	stored := rawdb.ReadPersistentStateID(tester.db.diskdb)
   708  	if head != stored {
   709  		t.Fatalf("Failed to truncate excess history object above, stored: %d, head: %d", stored, head)
   710  	}
   711  }
   712  
   713  // copyAccounts returns a deep-copied account set of the provided one.
   714  func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte {
   715  	copied := make(map[common.Hash][]byte, len(set))
   716  	for key, val := range set {
   717  		copied[key] = common.CopyBytes(val)
   718  	}
   719  	return copied
   720  }
   721  
   722  // copyStorages returns a deep-copied storage set of the provided one.
   723  func copyStorages(set map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
   724  	copied := make(map[common.Hash]map[common.Hash][]byte, len(set))
   725  	for addrHash, subset := range set {
   726  		copied[addrHash] = make(map[common.Hash][]byte, len(subset))
   727  		for key, val := range subset {
   728  			copied[addrHash][key] = common.CopyBytes(val)
   729  		}
   730  	}
   731  	return copied
   732  }