github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/triedb/pathdb/database_test.go (about)

     1  // Copyright 2022 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package pathdb
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	"math/rand"
    24  	"testing"
    25  
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/core/rawdb"
    28  	"github.com/ethereum/go-ethereum/core/types"
    29  	"github.com/ethereum/go-ethereum/crypto"
    30  	"github.com/ethereum/go-ethereum/internal/testrand"
    31  	"github.com/ethereum/go-ethereum/rlp"
    32  	"github.com/ethereum/go-ethereum/trie/trienode"
    33  	"github.com/ethereum/go-ethereum/trie/triestate"
    34  	"github.com/holiman/uint256"
    35  )
    36  
    37  func updateTrie(addrHash common.Hash, root common.Hash, dirties, cleans map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) {
    38  	h, err := newTestHasher(addrHash, root, cleans)
    39  	if err != nil {
    40  		panic(fmt.Errorf("failed to create hasher, err: %w", err))
    41  	}
    42  	for key, val := range dirties {
    43  		if len(val) == 0 {
    44  			h.Delete(key.Bytes())
    45  		} else {
    46  			h.Update(key.Bytes(), val)
    47  		}
    48  	}
    49  	root, nodes, err := h.Commit(false)
    50  	if err != nil {
    51  		panic(fmt.Errorf("failed to commit hasher, err: %w", err))
    52  	}
    53  	return root, nodes
    54  }
    55  
    56  func generateAccount(storageRoot common.Hash) types.StateAccount {
    57  	return types.StateAccount{
    58  		Nonce:    uint64(rand.Intn(100)),
    59  		Balance:  uint256.NewInt(rand.Uint64()),
    60  		CodeHash: testrand.Bytes(32),
    61  		Root:     storageRoot,
    62  	}
    63  }
    64  
    65  const (
    66  	createAccountOp int = iota
    67  	modifyAccountOp
    68  	deleteAccountOp
    69  	opLen
    70  )
    71  
    72  type genctx struct {
    73  	accounts      map[common.Hash][]byte
    74  	storages      map[common.Hash]map[common.Hash][]byte
    75  	accountOrigin map[common.Address][]byte
    76  	storageOrigin map[common.Address]map[common.Hash][]byte
    77  	nodes         *trienode.MergedNodeSet
    78  }
    79  
    80  func newCtx() *genctx {
    81  	return &genctx{
    82  		accounts:      make(map[common.Hash][]byte),
    83  		storages:      make(map[common.Hash]map[common.Hash][]byte),
    84  		accountOrigin: make(map[common.Address][]byte),
    85  		storageOrigin: make(map[common.Address]map[common.Hash][]byte),
    86  		nodes:         trienode.NewMergedNodeSet(),
    87  	}
    88  }
    89  
    90  type tester struct {
    91  	db        *Database
    92  	roots     []common.Hash
    93  	preimages map[common.Hash]common.Address
    94  	accounts  map[common.Hash][]byte
    95  	storages  map[common.Hash]map[common.Hash][]byte
    96  
    97  	// state snapshots
    98  	snapAccounts map[common.Hash]map[common.Hash][]byte
    99  	snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
   100  }
   101  
   102  func newTester(t *testing.T, historyLimit uint64) *tester {
   103  	var (
   104  		disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
   105  		db      = New(disk, &Config{
   106  			StateHistory:   historyLimit,
   107  			CleanCacheSize: 16 * 1024,
   108  			DirtyCacheSize: 16 * 1024,
   109  		}, false)
   110  		obj = &tester{
   111  			db:           db,
   112  			preimages:    make(map[common.Hash]common.Address),
   113  			accounts:     make(map[common.Hash][]byte),
   114  			storages:     make(map[common.Hash]map[common.Hash][]byte),
   115  			snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
   116  			snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte),
   117  		}
   118  	)
   119  	for i := 0; i < 8; i++ {
   120  		var parent = types.EmptyRootHash
   121  		if len(obj.roots) != 0 {
   122  			parent = obj.roots[len(obj.roots)-1]
   123  		}
   124  		root, nodes, states := obj.generate(parent)
   125  		if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
   126  			panic(fmt.Errorf("failed to update state changes, err: %w", err))
   127  		}
   128  		obj.roots = append(obj.roots, root)
   129  	}
   130  	return obj
   131  }
   132  
   133  func (t *tester) release() {
   134  	t.db.Close()
   135  	t.db.diskdb.Close()
   136  }
   137  
   138  func (t *tester) randAccount() (common.Address, []byte) {
   139  	for addrHash, account := range t.accounts {
   140  		return t.preimages[addrHash], account
   141  	}
   142  	return common.Address{}, nil
   143  }
   144  
   145  func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
   146  	var (
   147  		addrHash = crypto.Keccak256Hash(addr.Bytes())
   148  		storage  = make(map[common.Hash][]byte)
   149  		origin   = make(map[common.Hash][]byte)
   150  	)
   151  	for i := 0; i < 10; i++ {
   152  		v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
   153  		hash := testrand.Hash()
   154  
   155  		storage[hash] = v
   156  		origin[hash] = nil
   157  	}
   158  	root, set := updateTrie(addrHash, types.EmptyRootHash, storage, nil)
   159  
   160  	ctx.storages[addrHash] = storage
   161  	ctx.storageOrigin[addr] = origin
   162  	ctx.nodes.Merge(set)
   163  	return root
   164  }
   165  
   166  func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
   167  	var (
   168  		addrHash = crypto.Keccak256Hash(addr.Bytes())
   169  		storage  = make(map[common.Hash][]byte)
   170  		origin   = make(map[common.Hash][]byte)
   171  	)
   172  	for hash, val := range t.storages[addrHash] {
   173  		origin[hash] = val
   174  		storage[hash] = nil
   175  
   176  		if len(origin) == 3 {
   177  			break
   178  		}
   179  	}
   180  	for i := 0; i < 3; i++ {
   181  		v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
   182  		hash := testrand.Hash()
   183  
   184  		storage[hash] = v
   185  		origin[hash] = nil
   186  	}
   187  	root, set := updateTrie(crypto.Keccak256Hash(addr.Bytes()), root, storage, t.storages[addrHash])
   188  
   189  	ctx.storages[addrHash] = storage
   190  	ctx.storageOrigin[addr] = origin
   191  	ctx.nodes.Merge(set)
   192  	return root
   193  }
   194  
   195  func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
   196  	var (
   197  		addrHash = crypto.Keccak256Hash(addr.Bytes())
   198  		storage  = make(map[common.Hash][]byte)
   199  		origin   = make(map[common.Hash][]byte)
   200  	)
   201  	for hash, val := range t.storages[addrHash] {
   202  		origin[hash] = val
   203  		storage[hash] = nil
   204  	}
   205  	root, set := updateTrie(addrHash, root, storage, t.storages[addrHash])
   206  	if root != types.EmptyRootHash {
   207  		panic("failed to clear storage trie")
   208  	}
   209  	ctx.storages[addrHash] = storage
   210  	ctx.storageOrigin[addr] = origin
   211  	ctx.nodes.Merge(set)
   212  	return root
   213  }
   214  
   215  func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *triestate.Set) {
   216  	var (
   217  		ctx     = newCtx()
   218  		dirties = make(map[common.Hash]struct{})
   219  	)
   220  	for i := 0; i < 20; i++ {
   221  		switch rand.Intn(opLen) {
   222  		case createAccountOp:
   223  			// account creation
   224  			addr := testrand.Address()
   225  			addrHash := crypto.Keccak256Hash(addr.Bytes())
   226  			if _, ok := t.accounts[addrHash]; ok {
   227  				continue
   228  			}
   229  			if _, ok := dirties[addrHash]; ok {
   230  				continue
   231  			}
   232  			dirties[addrHash] = struct{}{}
   233  
   234  			root := t.generateStorage(ctx, addr)
   235  			ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
   236  			ctx.accountOrigin[addr] = nil
   237  			t.preimages[addrHash] = addr
   238  
   239  		case modifyAccountOp:
   240  			// account mutation
   241  			addr, account := t.randAccount()
   242  			if addr == (common.Address{}) {
   243  				continue
   244  			}
   245  			addrHash := crypto.Keccak256Hash(addr.Bytes())
   246  			if _, ok := dirties[addrHash]; ok {
   247  				continue
   248  			}
   249  			dirties[addrHash] = struct{}{}
   250  
   251  			acct, _ := types.FullAccount(account)
   252  			stRoot := t.mutateStorage(ctx, addr, acct.Root)
   253  			newAccount := types.SlimAccountRLP(generateAccount(stRoot))
   254  
   255  			ctx.accounts[addrHash] = newAccount
   256  			ctx.accountOrigin[addr] = account
   257  
   258  		case deleteAccountOp:
   259  			// account deletion
   260  			addr, account := t.randAccount()
   261  			if addr == (common.Address{}) {
   262  				continue
   263  			}
   264  			addrHash := crypto.Keccak256Hash(addr.Bytes())
   265  			if _, ok := dirties[addrHash]; ok {
   266  				continue
   267  			}
   268  			dirties[addrHash] = struct{}{}
   269  
   270  			acct, _ := types.FullAccount(account)
   271  			if acct.Root != types.EmptyRootHash {
   272  				t.clearStorage(ctx, addr, acct.Root)
   273  			}
   274  			ctx.accounts[addrHash] = nil
   275  			ctx.accountOrigin[addr] = account
   276  		}
   277  	}
   278  	root, set := updateTrie(common.Hash{}, parent, ctx.accounts, t.accounts)
   279  	ctx.nodes.Merge(set)
   280  
   281  	// Save state snapshot before commit
   282  	t.snapAccounts[parent] = copyAccounts(t.accounts)
   283  	t.snapStorages[parent] = copyStorages(t.storages)
   284  
   285  	// Commit all changes to live state set
   286  	for addrHash, account := range ctx.accounts {
   287  		if len(account) == 0 {
   288  			delete(t.accounts, addrHash)
   289  		} else {
   290  			t.accounts[addrHash] = account
   291  		}
   292  	}
   293  	for addrHash, slots := range ctx.storages {
   294  		if _, ok := t.storages[addrHash]; !ok {
   295  			t.storages[addrHash] = make(map[common.Hash][]byte)
   296  		}
   297  		for sHash, slot := range slots {
   298  			if len(slot) == 0 {
   299  				delete(t.storages[addrHash], sHash)
   300  			} else {
   301  				t.storages[addrHash][sHash] = slot
   302  			}
   303  		}
   304  	}
   305  	return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin)
   306  }
   307  
   308  // lastHash returns the latest root hash, or empty if nothing is cached.
   309  func (t *tester) lastHash() common.Hash {
   310  	if len(t.roots) == 0 {
   311  		return common.Hash{}
   312  	}
   313  	return t.roots[len(t.roots)-1]
   314  }
   315  
   316  func (t *tester) verifyState(root common.Hash) error {
   317  	reader, err := t.db.Reader(root)
   318  	if err != nil {
   319  		return err
   320  	}
   321  	_, err = reader.Node(common.Hash{}, nil, root)
   322  	if err != nil {
   323  		return errors.New("root node is not available")
   324  	}
   325  	for addrHash, account := range t.snapAccounts[root] {
   326  		path := crypto.Keccak256(addrHash.Bytes())
   327  		blob, err := reader.Node(common.Hash{}, path, crypto.Keccak256Hash(account))
   328  		if err != nil || !bytes.Equal(blob, account) {
   329  			return fmt.Errorf("account is mismatched: %w", err)
   330  		}
   331  	}
   332  	for addrHash, slots := range t.snapStorages[root] {
   333  		for hash, slot := range slots {
   334  			path := crypto.Keccak256(hash.Bytes())
   335  			blob, err := reader.Node(addrHash, path, crypto.Keccak256Hash(slot))
   336  			if err != nil || !bytes.Equal(blob, slot) {
   337  				return fmt.Errorf("slot is mismatched: %w", err)
   338  			}
   339  		}
   340  	}
   341  	return nil
   342  }
   343  
   344  func (t *tester) verifyHistory() error {
   345  	bottom := t.bottomIndex()
   346  	for i, root := range t.roots {
   347  		// The state history related to the state above disk layer should not exist.
   348  		if i > bottom {
   349  			_, err := readHistory(t.db.freezer, uint64(i+1))
   350  			if err == nil {
   351  				return errors.New("unexpected state history")
   352  			}
   353  			continue
   354  		}
   355  		// The state history related to the state below or equal to the disk layer
   356  		// should exist.
   357  		obj, err := readHistory(t.db.freezer, uint64(i+1))
   358  		if err != nil {
   359  			return err
   360  		}
   361  		parent := types.EmptyRootHash
   362  		if i != 0 {
   363  			parent = t.roots[i-1]
   364  		}
   365  		if obj.meta.parent != parent {
   366  			return fmt.Errorf("unexpected parent, want: %x, got: %x", parent, obj.meta.parent)
   367  		}
   368  		if obj.meta.root != root {
   369  			return fmt.Errorf("unexpected root, want: %x, got: %x", root, obj.meta.root)
   370  		}
   371  	}
   372  	return nil
   373  }
   374  
   375  // bottomIndex returns the index of current disk layer.
   376  func (t *tester) bottomIndex() int {
   377  	bottom := t.db.tree.bottom()
   378  	for i := 0; i < len(t.roots); i++ {
   379  		if t.roots[i] == bottom.rootHash() {
   380  			return i
   381  		}
   382  	}
   383  	return -1
   384  }
   385  
   386  func TestDatabaseRollback(t *testing.T) {
   387  	// Redefine the diff layer depth allowance for faster testing.
   388  	maxDiffLayers = 4
   389  	defer func() {
   390  		maxDiffLayers = 128
   391  	}()
   392  
   393  	// Verify state histories
   394  	tester := newTester(t, 0)
   395  	defer tester.release()
   396  
   397  	if err := tester.verifyHistory(); err != nil {
   398  		t.Fatalf("Invalid state history, err: %v", err)
   399  	}
   400  	// Revert database from top to bottom
   401  	for i := tester.bottomIndex(); i >= 0; i-- {
   402  		root := tester.roots[i]
   403  		parent := types.EmptyRootHash
   404  		if i > 0 {
   405  			parent = tester.roots[i-1]
   406  		}
   407  		loader := newHashLoader(tester.snapAccounts[root], tester.snapStorages[root])
   408  		if err := tester.db.Recover(parent, loader); err != nil {
   409  			t.Fatalf("Failed to revert db, err: %v", err)
   410  		}
   411  		if i > 0 {
   412  			if err := tester.verifyState(parent); err != nil {
   413  				t.Fatalf("Failed to verify state, err: %v", err)
   414  			}
   415  		}
   416  	}
   417  	if tester.db.tree.len() != 1 {
   418  		t.Fatal("Only disk layer is expected")
   419  	}
   420  }
   421  
   422  func TestDatabaseRecoverable(t *testing.T) {
   423  	// Redefine the diff layer depth allowance for faster testing.
   424  	maxDiffLayers = 4
   425  	defer func() {
   426  		maxDiffLayers = 128
   427  	}()
   428  
   429  	var (
   430  		tester = newTester(t, 0)
   431  		index  = tester.bottomIndex()
   432  	)
   433  	defer tester.release()
   434  
   435  	var cases = []struct {
   436  		root   common.Hash
   437  		expect bool
   438  	}{
   439  		// Unknown state should be unrecoverable
   440  		{common.Hash{0x1}, false},
   441  
   442  		// Initial state should be recoverable
   443  		{types.EmptyRootHash, true},
   444  
   445  		// Initial state should be recoverable
   446  		{common.Hash{}, true},
   447  
   448  		// Layers below current disk layer are recoverable
   449  		{tester.roots[index-1], true},
   450  
   451  		// Disklayer itself is not recoverable, since it's
   452  		// available for accessing.
   453  		{tester.roots[index], false},
   454  
   455  		// Layers above current disk layer are not recoverable
   456  		// since they are available for accessing.
   457  		{tester.roots[index+1], false},
   458  	}
   459  	for i, c := range cases {
   460  		result := tester.db.Recoverable(c.root)
   461  		if result != c.expect {
   462  			t.Fatalf("case: %d, unexpected result, want %t, got %t", i, c.expect, result)
   463  		}
   464  	}
   465  }
   466  
   467  func TestDisable(t *testing.T) {
   468  	// Redefine the diff layer depth allowance for faster testing.
   469  	maxDiffLayers = 4
   470  	defer func() {
   471  		maxDiffLayers = 128
   472  	}()
   473  
   474  	tester := newTester(t, 0)
   475  	defer tester.release()
   476  
   477  	stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
   478  	if err := tester.db.Disable(); err != nil {
   479  		t.Fatalf("Failed to deactivate database: %v", err)
   480  	}
   481  	if err := tester.db.Enable(types.EmptyRootHash); err == nil {
   482  		t.Fatal("Invalid activation should be rejected")
   483  	}
   484  	if err := tester.db.Enable(stored); err != nil {
   485  		t.Fatalf("Failed to activate database: %v", err)
   486  	}
   487  
   488  	// Ensure journal is deleted from disk
   489  	if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 {
   490  		t.Fatal("Failed to clean journal")
   491  	}
   492  	// Ensure all trie histories are removed
   493  	n, err := tester.db.freezer.Ancients()
   494  	if err != nil {
   495  		t.Fatal("Failed to clean state history")
   496  	}
   497  	if n != 0 {
   498  		t.Fatal("Failed to clean state history")
   499  	}
   500  	// Verify layer tree structure, single disk layer is expected
   501  	if tester.db.tree.len() != 1 {
   502  		t.Fatalf("Extra layer kept %d", tester.db.tree.len())
   503  	}
   504  	if tester.db.tree.bottom().rootHash() != stored {
   505  		t.Fatalf("Root hash is not matched exp %x got %x", stored, tester.db.tree.bottom().rootHash())
   506  	}
   507  }
   508  
   509  func TestCommit(t *testing.T) {
   510  	// Redefine the diff layer depth allowance for faster testing.
   511  	maxDiffLayers = 4
   512  	defer func() {
   513  		maxDiffLayers = 128
   514  	}()
   515  
   516  	tester := newTester(t, 0)
   517  	defer tester.release()
   518  
   519  	if err := tester.db.Commit(tester.lastHash(), false); err != nil {
   520  		t.Fatalf("Failed to cap database, err: %v", err)
   521  	}
   522  	// Verify layer tree structure, single disk layer is expected
   523  	if tester.db.tree.len() != 1 {
   524  		t.Fatal("Layer tree structure is invalid")
   525  	}
   526  	if tester.db.tree.bottom().rootHash() != tester.lastHash() {
   527  		t.Fatal("Layer tree structure is invalid")
   528  	}
   529  	// Verify states
   530  	if err := tester.verifyState(tester.lastHash()); err != nil {
   531  		t.Fatalf("State is invalid, err: %v", err)
   532  	}
   533  	// Verify state histories
   534  	if err := tester.verifyHistory(); err != nil {
   535  		t.Fatalf("State history is invalid, err: %v", err)
   536  	}
   537  }
   538  
   539  func TestJournal(t *testing.T) {
   540  	// Redefine the diff layer depth allowance for faster testing.
   541  	maxDiffLayers = 4
   542  	defer func() {
   543  		maxDiffLayers = 128
   544  	}()
   545  
   546  	tester := newTester(t, 0)
   547  	defer tester.release()
   548  
   549  	if err := tester.db.Journal(tester.lastHash()); err != nil {
   550  		t.Errorf("Failed to journal, err: %v", err)
   551  	}
   552  	tester.db.Close()
   553  	tester.db = New(tester.db.diskdb, nil, false)
   554  
   555  	// Verify states including disk layer and all diff on top.
   556  	for i := 0; i < len(tester.roots); i++ {
   557  		if i >= tester.bottomIndex() {
   558  			if err := tester.verifyState(tester.roots[i]); err != nil {
   559  				t.Fatalf("Invalid state, err: %v", err)
   560  			}
   561  			continue
   562  		}
   563  		if err := tester.verifyState(tester.roots[i]); err == nil {
   564  			t.Fatal("Unexpected state")
   565  		}
   566  	}
   567  }
   568  
   569  func TestCorruptedJournal(t *testing.T) {
   570  	// Redefine the diff layer depth allowance for faster testing.
   571  	maxDiffLayers = 4
   572  	defer func() {
   573  		maxDiffLayers = 128
   574  	}()
   575  
   576  	tester := newTester(t, 0)
   577  	defer tester.release()
   578  
   579  	if err := tester.db.Journal(tester.lastHash()); err != nil {
   580  		t.Errorf("Failed to journal, err: %v", err)
   581  	}
   582  	tester.db.Close()
   583  	root := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
   584  
   585  	// Mutate the journal in disk, it should be regarded as invalid
   586  	blob := rawdb.ReadTrieJournal(tester.db.diskdb)
   587  	blob[0] = 0xa
   588  	rawdb.WriteTrieJournal(tester.db.diskdb, blob)
   589  
   590  	// Verify states, all not-yet-written states should be discarded
   591  	tester.db = New(tester.db.diskdb, nil, false)
   592  	for i := 0; i < len(tester.roots); i++ {
   593  		if tester.roots[i] == root {
   594  			if err := tester.verifyState(root); err != nil {
   595  				t.Fatalf("Disk state is corrupted, err: %v", err)
   596  			}
   597  			continue
   598  		}
   599  		if err := tester.verifyState(tester.roots[i]); err == nil {
   600  			t.Fatal("Unexpected state")
   601  		}
   602  	}
   603  }
   604  
   605  // TestTailTruncateHistory function is designed to test a specific edge case where,
   606  // when history objects are removed from the end, it should trigger a state flush
   607  // if the ID of the new tail object is even higher than the persisted state ID.
   608  //
   609  // For example, let's say the ID of the persistent state is 10, and the current
   610  // history objects range from ID(5) to ID(15). As we accumulate six more objects,
   611  // the history will expand to cover ID(11) to ID(21). ID(11) then becomes the
   612  // oldest history object, and its ID is even higher than the stored state.
   613  //
   614  // In this scenario, it is mandatory to update the persistent state before
   615  // truncating the tail histories. This ensures that the ID of the persistent state
   616  // always falls within the range of [oldest-history-id, latest-history-id].
   617  func TestTailTruncateHistory(t *testing.T) {
   618  	// Redefine the diff layer depth allowance for faster testing.
   619  	maxDiffLayers = 4
   620  	defer func() {
   621  		maxDiffLayers = 128
   622  	}()
   623  
   624  	tester := newTester(t, 10)
   625  	defer tester.release()
   626  
   627  	tester.db.Close()
   628  	tester.db = New(tester.db.diskdb, &Config{StateHistory: 10}, false)
   629  
   630  	head, err := tester.db.freezer.Ancients()
   631  	if err != nil {
   632  		t.Fatalf("Failed to obtain freezer head")
   633  	}
   634  	stored := rawdb.ReadPersistentStateID(tester.db.diskdb)
   635  	if head != stored {
   636  		t.Fatalf("Failed to truncate excess history object above, stored: %d, head: %d", stored, head)
   637  	}
   638  }
   639  
   640  // copyAccounts returns a deep-copied account set of the provided one.
   641  func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte {
   642  	copied := make(map[common.Hash][]byte, len(set))
   643  	for key, val := range set {
   644  		copied[key] = common.CopyBytes(val)
   645  	}
   646  	return copied
   647  }
   648  
   649  // copyStorages returns a deep-copied storage set of the provided one.
   650  func copyStorages(set map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
   651  	copied := make(map[common.Hash]map[common.Hash][]byte, len(set))
   652  	for addrHash, subset := range set {
   653  		copied[addrHash] = make(map[common.Hash][]byte, len(subset))
   654  		for key, val := range subset {
   655  			copied[addrHash][key] = common.CopyBytes(val)
   656  		}
   657  	}
   658  	return copied
   659  }