github.com/dim4egster/coreth@v0.10.2/plugin/evm/atomic_syncer_test.go (about)

     1  // (c) 2019-2020, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package evm
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"math/rand"
    10  	"testing"
    11  
    12  	"github.com/stretchr/testify/assert"
    13  
    14  	"github.com/dim4egster/qmallgo/database/memdb"
    15  	"github.com/dim4egster/qmallgo/database/versiondb"
    16  
    17  	"github.com/dim4egster/coreth/ethdb/memorydb"
    18  	"github.com/dim4egster/coreth/plugin/evm/message"
    19  	syncclient "github.com/dim4egster/coreth/sync/client"
    20  	"github.com/dim4egster/coreth/sync/handlers"
    21  	handlerstats "github.com/dim4egster/coreth/sync/handlers/stats"
    22  	"github.com/dim4egster/coreth/trie"
    23  	"github.com/ethereum/go-ethereum/common"
    24  )
    25  
    26  const commitInterval = 1024
    27  
    28  type atomicSyncTestCheckpoint struct {
    29  	expectedNumLeavesSynced int64       // expected number of leaves to have synced at this checkpoint
    30  	leafCutoff              int         // Number of leafs to sync before cutting off responses
    31  	targetRoot              common.Hash // Root of trie to resume syncing from after stopping
    32  	targetHeight            uint64      // Height to sync to after stopping
    33  }
    34  
    35  // testAtomicSyncer creates a leaf handler with [serverTrieDB] and tests to ensure that the atomic syncer can sync correctly
    36  // starting at [targetRoot], and stopping and resuming at each of the [checkpoints].
    37  func testAtomicSyncer(t *testing.T, serverTrieDB *trie.Database, targetHeight uint64, targetRoot common.Hash, checkpoints []atomicSyncTestCheckpoint, finalExpectedNumLeaves int64) {
    38  	ctx, cancel := context.WithCancel(context.Background())
    39  	defer cancel()
    40  
    41  	numLeaves := 0
    42  	mockClient := syncclient.NewMockClient(
    43  		message.Codec,
    44  		handlers.NewLeafsRequestHandler(serverTrieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()),
    45  		nil,
    46  		nil,
    47  	)
    48  
    49  	clientDB := versiondb.New(memdb.New())
    50  	repo, err := NewAtomicTxRepository(clientDB, message.Codec, 0, nil, nil, nil)
    51  	if err != nil {
    52  		t.Fatal("could not initialize atomix tx repository", err)
    53  	}
    54  	atomicBackend, err := NewAtomicBackend(clientDB, testSharedMemory(), nil, repo, 0, common.Hash{}, commitInterval)
    55  	if err != nil {
    56  		t.Fatal("could not initialize atomic backend", err)
    57  	}
    58  	atomicTrie := atomicBackend.AtomicTrie()
    59  
    60  	// For each checkpoint, replace the leafsIntercept to shut off the syncer at the correct point and force resume from the checkpoint's
    61  	// next trie.
    62  	for i, checkpoint := range checkpoints {
    63  		// Create syncer targeting the current [syncTrie].
    64  		syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight)
    65  		if err != nil {
    66  			t.Fatal(err)
    67  		}
    68  		mockClient.GetLeafsIntercept = func(_ message.LeafsRequest, leafsResponse message.LeafsResponse) (message.LeafsResponse, error) {
    69  			// If this request exceeds the desired number of leaves, intercept the request with an error
    70  			if numLeaves+len(leafsResponse.Keys) > checkpoint.leafCutoff {
    71  				return message.LeafsResponse{}, fmt.Errorf("intercept cut off responses after %d leaves", checkpoint.leafCutoff)
    72  			}
    73  
    74  			// Increment the number of leaves and return the original response
    75  			numLeaves += len(leafsResponse.Keys)
    76  			return leafsResponse, nil
    77  		}
    78  
    79  		syncer.Start(ctx)
    80  		if err := <-syncer.Done(); err == nil {
    81  			t.Fatalf("Expected syncer to fail at checkpoint with numLeaves %d", numLeaves)
    82  		}
    83  
    84  		assert.Equal(t, checkpoint.expectedNumLeavesSynced, int64(numLeaves), "unexpected number of leaves received at checkpoint %d", i)
    85  		// Replace the target root and height for the next checkpoint
    86  		targetRoot = checkpoint.targetRoot
    87  		targetHeight = checkpoint.targetHeight
    88  	}
    89  
    90  	// Create syncer targeting the current [targetRoot].
    91  	syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight)
    92  	if err != nil {
    93  		t.Fatal(err)
    94  	}
    95  
    96  	// Update intercept to only count the leaves
    97  	mockClient.GetLeafsIntercept = func(_ message.LeafsRequest, leafsResponse message.LeafsResponse) (message.LeafsResponse, error) {
    98  		// Increment the number of leaves and return the original response
    99  		numLeaves += len(leafsResponse.Keys)
   100  		return leafsResponse, nil
   101  	}
   102  
   103  	syncer.Start(ctx)
   104  	if err := <-syncer.Done(); err != nil {
   105  		t.Fatalf("Expected syncer to finish successfully but failed due to %s", err)
   106  	}
   107  
   108  	assert.Equal(t, finalExpectedNumLeaves, int64(numLeaves), "unexpected number of leaves received to match")
   109  
   110  	// we re-initialise trie DB for asserting the trie to make sure any issues with unflushed writes
   111  	// are caught here as this will only pass if all trie nodes have been written to the underlying DB
   112  	clientTrieDB := atomicTrie.TrieDB()
   113  	trie.AssertTrieConsistency(t, targetRoot, serverTrieDB, clientTrieDB, nil)
   114  
   115  	// check all commit heights are created
   116  	for height := uint64(commitInterval); height <= targetHeight; height += commitInterval {
   117  		root, err := atomicTrie.Root(height)
   118  		assert.NoError(t, err)
   119  		assert.NotZero(t, root)
   120  	}
   121  }
   122  
   123  func TestAtomicSyncer(t *testing.T) {
   124  	rand.Seed(1)
   125  	targetHeight := 10 * uint64(commitInterval)
   126  	serverTrieDB := trie.NewDatabase(memorydb.New())
   127  	root, _, _ := trie.GenerateTrie(t, serverTrieDB, int(targetHeight), atomicKeyLength)
   128  
   129  	testAtomicSyncer(t, serverTrieDB, targetHeight, root, nil, int64(targetHeight))
   130  }
   131  
   132  func TestAtomicSyncerResume(t *testing.T) {
   133  	rand.Seed(1)
   134  	targetHeight := 10 * uint64(commitInterval)
   135  	serverTrieDB := trie.NewDatabase(memorydb.New())
   136  	numTrieKeys := int(targetHeight) - 1 // no atomic ops for genesis
   137  	root, _, _ := trie.GenerateTrie(t, serverTrieDB, numTrieKeys, atomicKeyLength)
   138  
   139  	testAtomicSyncer(t, serverTrieDB, targetHeight, root, []atomicSyncTestCheckpoint{
   140  		{
   141  			targetRoot:              root,
   142  			targetHeight:            targetHeight,
   143  			leafCutoff:              commitInterval*5 - 1,
   144  			expectedNumLeavesSynced: commitInterval * 4,
   145  		},
   146  	}, int64(targetHeight)+commitInterval-1) // we will resync the last commitInterval - 1 leafs
   147  }
   148  
   149  func TestAtomicSyncerResumeNewRootCheckpoint(t *testing.T) {
   150  	rand.Seed(1)
   151  	targetHeight1 := 10 * uint64(commitInterval)
   152  	serverTrieDB := trie.NewDatabase(memorydb.New())
   153  	numTrieKeys1 := int(targetHeight1) - 1 // no atomic ops for genesis
   154  	root1, _, _ := trie.GenerateTrie(t, serverTrieDB, numTrieKeys1, atomicKeyLength)
   155  
   156  	rand.Seed(1) // seed rand again to get the same leafs in GenerateTrie
   157  	targetHeight2 := 20 * uint64(commitInterval)
   158  	numTrieKeys2 := int(targetHeight2) - 1 // no atomic ops for genesis
   159  	root2, _, _ := trie.GenerateTrie(t, serverTrieDB, numTrieKeys2, atomicKeyLength)
   160  
   161  	testAtomicSyncer(t, serverTrieDB, targetHeight1, root1, []atomicSyncTestCheckpoint{
   162  		{
   163  			targetRoot:              root2,
   164  			targetHeight:            targetHeight2,
   165  			leafCutoff:              commitInterval*5 - 1,
   166  			expectedNumLeavesSynced: commitInterval * 4,
   167  		},
   168  	}, int64(targetHeight2)+commitInterval-1) // we will resync the last commitInterval - 1 leafs
   169  }