github.com/tacshi/go-ethereum@v0.0.0-20230616113857-84a434e20921/trie/sync_test.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package trie
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"testing"
    23  
    24  	"github.com/tacshi/go-ethereum/common"
    25  	"github.com/tacshi/go-ethereum/core/rawdb"
    26  	"github.com/tacshi/go-ethereum/core/types"
    27  	"github.com/tacshi/go-ethereum/crypto"
    28  	"github.com/tacshi/go-ethereum/ethdb/memorydb"
    29  )
    30  
    31  // makeTestTrie create a sample test trie to test node-wise reconstruction.
    32  func makeTestTrie() (*Database, *StateTrie, map[string][]byte) {
    33  	// Create an empty trie
    34  	triedb := NewDatabase(rawdb.NewMemoryDatabase())
    35  	trie, _ := NewStateTrie(TrieID(common.Hash{}), triedb)
    36  
    37  	// Fill it with some arbitrary data
    38  	content := make(map[string][]byte)
    39  	for i := byte(0); i < 255; i++ {
    40  		// Map the same data under multiple keys
    41  		key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
    42  		content[string(key)] = val
    43  		trie.Update(key, val)
    44  
    45  		key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
    46  		content[string(key)] = val
    47  		trie.Update(key, val)
    48  
    49  		// Add some other data to inflate the trie
    50  		for j := byte(3); j < 13; j++ {
    51  			key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
    52  			content[string(key)] = val
    53  			trie.Update(key, val)
    54  		}
    55  	}
    56  	root, nodes := trie.Commit(false)
    57  	if err := triedb.Update(NewWithNodeSet(nodes)); err != nil {
    58  		panic(fmt.Errorf("failed to commit db %v", err))
    59  	}
    60  	// Re-create the trie based on the new state
    61  	trie, _ = NewStateTrie(TrieID(root), triedb)
    62  	return triedb, trie, content
    63  }
    64  
    65  // checkTrieContents cross references a reconstructed trie with an expected data
    66  // content map.
    67  func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) {
    68  	// Check root availability and trie contents
    69  	trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), db)
    70  	if err != nil {
    71  		t.Fatalf("failed to create trie at %x: %v", root, err)
    72  	}
    73  	if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil {
    74  		t.Fatalf("inconsistent trie at %x: %v", root, err)
    75  	}
    76  	for key, val := range content {
    77  		if have := trie.Get([]byte(key)); !bytes.Equal(have, val) {
    78  			t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
    79  		}
    80  	}
    81  }
    82  
    83  // checkTrieConsistency checks that all nodes in a trie are indeed present.
    84  func checkTrieConsistency(db *Database, root common.Hash) error {
    85  	// Create and iterate a trie rooted in a subnode
    86  	trie, err := NewStateTrie(TrieID(root), db)
    87  	if err != nil {
    88  		return nil // Consider a non existent state consistent
    89  	}
    90  	it := trie.NodeIterator(nil)
    91  	for it.Next(true) {
    92  	}
    93  	return it.Error()
    94  }
    95  
    96  // trieElement represents the element in the state trie(bytecode or trie node).
    97  type trieElement struct {
    98  	path     string
    99  	hash     common.Hash
   100  	syncPath SyncPath
   101  }
   102  
   103  // Tests that an empty trie is not scheduled for syncing.
   104  func TestEmptySync(t *testing.T) {
   105  	dbA := NewDatabase(rawdb.NewMemoryDatabase())
   106  	dbB := NewDatabase(rawdb.NewMemoryDatabase())
   107  	emptyA, _ := New(TrieID(common.Hash{}), dbA)
   108  	emptyB, _ := New(TrieID(types.EmptyRootHash), dbB)
   109  
   110  	for i, trie := range []*Trie{emptyA, emptyB} {
   111  		sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB}[i].Scheme())
   112  		if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 {
   113  			t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes)
   114  		}
   115  	}
   116  }
   117  
   118  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   119  // requesting retrieval tasks and returning all of them in one go.
   120  func TestIterativeSyncIndividual(t *testing.T)       { testIterativeSync(t, 1, false) }
   121  func TestIterativeSyncBatched(t *testing.T)          { testIterativeSync(t, 100, false) }
   122  func TestIterativeSyncIndividualByPath(t *testing.T) { testIterativeSync(t, 1, true) }
   123  func TestIterativeSyncBatchedByPath(t *testing.T)    { testIterativeSync(t, 100, true) }
   124  
   125  func testIterativeSync(t *testing.T, count int, bypath bool) {
   126  	// Create a random trie to copy
   127  	srcDb, srcTrie, srcData := makeTestTrie()
   128  
   129  	// Create a destination trie and sync with the scheduler
   130  	diskdb := rawdb.NewMemoryDatabase()
   131  	triedb := NewDatabase(diskdb)
   132  	sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme())
   133  
   134  	// The code requests are ignored here since there is no code
   135  	// at the testing trie.
   136  	paths, nodes, _ := sched.Missing(count)
   137  	var elements []trieElement
   138  	for i := 0; i < len(paths); i++ {
   139  		elements = append(elements, trieElement{
   140  			path:     paths[i],
   141  			hash:     nodes[i],
   142  			syncPath: NewSyncPath([]byte(paths[i])),
   143  		})
   144  	}
   145  	for len(elements) > 0 {
   146  		results := make([]NodeSyncResult, len(elements))
   147  		if !bypath {
   148  			for i, element := range elements {
   149  				data, err := srcDb.Node(element.hash)
   150  				if err != nil {
   151  					t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err)
   152  				}
   153  				results[i] = NodeSyncResult{element.path, data}
   154  			}
   155  		} else {
   156  			for i, element := range elements {
   157  				data, _, err := srcTrie.TryGetNode(element.syncPath[len(element.syncPath)-1])
   158  				if err != nil {
   159  					t.Fatalf("failed to retrieve node data for path %x: %v", element.path, err)
   160  				}
   161  				results[i] = NodeSyncResult{element.path, data}
   162  			}
   163  		}
   164  		for _, result := range results {
   165  			if err := sched.ProcessNode(result); err != nil {
   166  				t.Fatalf("failed to process result %v", err)
   167  			}
   168  		}
   169  		batch := diskdb.NewBatch()
   170  		if err := sched.Commit(batch); err != nil {
   171  			t.Fatalf("failed to commit data: %v", err)
   172  		}
   173  		batch.Write()
   174  
   175  		paths, nodes, _ = sched.Missing(count)
   176  		elements = elements[:0]
   177  		for i := 0; i < len(paths); i++ {
   178  			elements = append(elements, trieElement{
   179  				path:     paths[i],
   180  				hash:     nodes[i],
   181  				syncPath: NewSyncPath([]byte(paths[i])),
   182  			})
   183  		}
   184  	}
   185  	// Cross check that the two tries are in sync
   186  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   187  }
   188  
   189  // Tests that the trie scheduler can correctly reconstruct the state even if only
   190  // partial results are returned, and the others sent only later.
   191  func TestIterativeDelayedSync(t *testing.T) {
   192  	// Create a random trie to copy
   193  	srcDb, srcTrie, srcData := makeTestTrie()
   194  
   195  	// Create a destination trie and sync with the scheduler
   196  	diskdb := rawdb.NewMemoryDatabase()
   197  	triedb := NewDatabase(diskdb)
   198  	sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme())
   199  
   200  	// The code requests are ignored here since there is no code
   201  	// at the testing trie.
   202  	paths, nodes, _ := sched.Missing(10000)
   203  	var elements []trieElement
   204  	for i := 0; i < len(paths); i++ {
   205  		elements = append(elements, trieElement{
   206  			path:     paths[i],
   207  			hash:     nodes[i],
   208  			syncPath: NewSyncPath([]byte(paths[i])),
   209  		})
   210  	}
   211  	for len(elements) > 0 {
   212  		// Sync only half of the scheduled nodes
   213  		results := make([]NodeSyncResult, len(elements)/2+1)
   214  		for i, element := range elements[:len(results)] {
   215  			data, err := srcDb.Node(element.hash)
   216  			if err != nil {
   217  				t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
   218  			}
   219  			results[i] = NodeSyncResult{element.path, data}
   220  		}
   221  		for _, result := range results {
   222  			if err := sched.ProcessNode(result); err != nil {
   223  				t.Fatalf("failed to process result %v", err)
   224  			}
   225  		}
   226  		batch := diskdb.NewBatch()
   227  		if err := sched.Commit(batch); err != nil {
   228  			t.Fatalf("failed to commit data: %v", err)
   229  		}
   230  		batch.Write()
   231  
   232  		paths, nodes, _ = sched.Missing(10000)
   233  		elements = elements[len(results):]
   234  		for i := 0; i < len(paths); i++ {
   235  			elements = append(elements, trieElement{
   236  				path:     paths[i],
   237  				hash:     nodes[i],
   238  				syncPath: NewSyncPath([]byte(paths[i])),
   239  			})
   240  		}
   241  	}
   242  	// Cross check that the two tries are in sync
   243  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   244  }
   245  
   246  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   247  // requesting retrieval tasks and returning all of them in one go, however in a
   248  // random order.
   249  func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1) }
   250  func TestIterativeRandomSyncBatched(t *testing.T)    { testIterativeRandomSync(t, 100) }
   251  
   252  func testIterativeRandomSync(t *testing.T, count int) {
   253  	// Create a random trie to copy
   254  	srcDb, srcTrie, srcData := makeTestTrie()
   255  
   256  	// Create a destination trie and sync with the scheduler
   257  	diskdb := rawdb.NewMemoryDatabase()
   258  	triedb := NewDatabase(diskdb)
   259  	sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme())
   260  
   261  	// The code requests are ignored here since there is no code
   262  	// at the testing trie.
   263  	paths, nodes, _ := sched.Missing(count)
   264  	queue := make(map[string]trieElement)
   265  	for i, path := range paths {
   266  		queue[path] = trieElement{
   267  			path:     paths[i],
   268  			hash:     nodes[i],
   269  			syncPath: NewSyncPath([]byte(paths[i])),
   270  		}
   271  	}
   272  	for len(queue) > 0 {
   273  		// Fetch all the queued nodes in a random order
   274  		results := make([]NodeSyncResult, 0, len(queue))
   275  		for path, element := range queue {
   276  			data, err := srcDb.Node(element.hash)
   277  			if err != nil {
   278  				t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
   279  			}
   280  			results = append(results, NodeSyncResult{path, data})
   281  		}
   282  		// Feed the retrieved results back and queue new tasks
   283  		for _, result := range results {
   284  			if err := sched.ProcessNode(result); err != nil {
   285  				t.Fatalf("failed to process result %v", err)
   286  			}
   287  		}
   288  		batch := diskdb.NewBatch()
   289  		if err := sched.Commit(batch); err != nil {
   290  			t.Fatalf("failed to commit data: %v", err)
   291  		}
   292  		batch.Write()
   293  
   294  		paths, nodes, _ = sched.Missing(count)
   295  		queue = make(map[string]trieElement)
   296  		for i, path := range paths {
   297  			queue[path] = trieElement{
   298  				path:     path,
   299  				hash:     nodes[i],
   300  				syncPath: NewSyncPath([]byte(path)),
   301  			}
   302  		}
   303  	}
   304  	// Cross check that the two tries are in sync
   305  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   306  }
   307  
   308  // Tests that the trie scheduler can correctly reconstruct the state even if only
   309  // partial results are returned (Even those randomly), others sent only later.
   310  func TestIterativeRandomDelayedSync(t *testing.T) {
   311  	// Create a random trie to copy
   312  	srcDb, srcTrie, srcData := makeTestTrie()
   313  
   314  	// Create a destination trie and sync with the scheduler
   315  	diskdb := rawdb.NewMemoryDatabase()
   316  	triedb := NewDatabase(diskdb)
   317  	sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme())
   318  
   319  	// The code requests are ignored here since there is no code
   320  	// at the testing trie.
   321  	paths, nodes, _ := sched.Missing(10000)
   322  	queue := make(map[string]trieElement)
   323  	for i, path := range paths {
   324  		queue[path] = trieElement{
   325  			path:     path,
   326  			hash:     nodes[i],
   327  			syncPath: NewSyncPath([]byte(path)),
   328  		}
   329  	}
   330  	for len(queue) > 0 {
   331  		// Sync only half of the scheduled nodes, even those in random order
   332  		results := make([]NodeSyncResult, 0, len(queue)/2+1)
   333  		for path, element := range queue {
   334  			data, err := srcDb.Node(element.hash)
   335  			if err != nil {
   336  				t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
   337  			}
   338  			results = append(results, NodeSyncResult{path, data})
   339  
   340  			if len(results) >= cap(results) {
   341  				break
   342  			}
   343  		}
   344  		// Feed the retrieved results back and queue new tasks
   345  		for _, result := range results {
   346  			if err := sched.ProcessNode(result); err != nil {
   347  				t.Fatalf("failed to process result %v", err)
   348  			}
   349  		}
   350  		batch := diskdb.NewBatch()
   351  		if err := sched.Commit(batch); err != nil {
   352  			t.Fatalf("failed to commit data: %v", err)
   353  		}
   354  		batch.Write()
   355  		for _, result := range results {
   356  			delete(queue, result.Path)
   357  		}
   358  		paths, nodes, _ = sched.Missing(10000)
   359  		for i, path := range paths {
   360  			queue[path] = trieElement{
   361  				path:     path,
   362  				hash:     nodes[i],
   363  				syncPath: NewSyncPath([]byte(path)),
   364  			}
   365  		}
   366  	}
   367  	// Cross check that the two tries are in sync
   368  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   369  }
   370  
   371  // Tests that a trie sync will not request nodes multiple times, even if they
   372  // have such references.
   373  func TestDuplicateAvoidanceSync(t *testing.T) {
   374  	// Create a random trie to copy
   375  	srcDb, srcTrie, srcData := makeTestTrie()
   376  
   377  	// Create a destination trie and sync with the scheduler
   378  	diskdb := rawdb.NewMemoryDatabase()
   379  	triedb := NewDatabase(diskdb)
   380  	sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme())
   381  
   382  	// The code requests are ignored here since there is no code
   383  	// at the testing trie.
   384  	paths, nodes, _ := sched.Missing(0)
   385  	var elements []trieElement
   386  	for i := 0; i < len(paths); i++ {
   387  		elements = append(elements, trieElement{
   388  			path:     paths[i],
   389  			hash:     nodes[i],
   390  			syncPath: NewSyncPath([]byte(paths[i])),
   391  		})
   392  	}
   393  	requested := make(map[common.Hash]struct{})
   394  
   395  	for len(elements) > 0 {
   396  		results := make([]NodeSyncResult, len(elements))
   397  		for i, element := range elements {
   398  			data, err := srcDb.Node(element.hash)
   399  			if err != nil {
   400  				t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
   401  			}
   402  			if _, ok := requested[element.hash]; ok {
   403  				t.Errorf("hash %x already requested once", element.hash)
   404  			}
   405  			requested[element.hash] = struct{}{}
   406  
   407  			results[i] = NodeSyncResult{element.path, data}
   408  		}
   409  		for _, result := range results {
   410  			if err := sched.ProcessNode(result); err != nil {
   411  				t.Fatalf("failed to process result %v", err)
   412  			}
   413  		}
   414  		batch := diskdb.NewBatch()
   415  		if err := sched.Commit(batch); err != nil {
   416  			t.Fatalf("failed to commit data: %v", err)
   417  		}
   418  		batch.Write()
   419  
   420  		paths, nodes, _ = sched.Missing(0)
   421  		elements = elements[:0]
   422  		for i := 0; i < len(paths); i++ {
   423  			elements = append(elements, trieElement{
   424  				path:     paths[i],
   425  				hash:     nodes[i],
   426  				syncPath: NewSyncPath([]byte(paths[i])),
   427  			})
   428  		}
   429  	}
   430  	// Cross check that the two tries are in sync
   431  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   432  }
   433  
   434  // Tests that at any point in time during a sync, only complete sub-tries are in
   435  // the database.
   436  func TestIncompleteSync(t *testing.T) {
   437  	// Create a random trie to copy
   438  	srcDb, srcTrie, _ := makeTestTrie()
   439  
   440  	// Create a destination trie and sync with the scheduler
   441  	diskdb := rawdb.NewMemoryDatabase()
   442  	triedb := NewDatabase(diskdb)
   443  	sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme())
   444  
   445  	// The code requests are ignored here since there is no code
   446  	// at the testing trie.
   447  	var (
   448  		added    []common.Hash
   449  		elements []trieElement
   450  		root     = srcTrie.Hash()
   451  	)
   452  	paths, nodes, _ := sched.Missing(1)
   453  	for i := 0; i < len(paths); i++ {
   454  		elements = append(elements, trieElement{
   455  			path:     paths[i],
   456  			hash:     nodes[i],
   457  			syncPath: NewSyncPath([]byte(paths[i])),
   458  		})
   459  	}
   460  	for len(elements) > 0 {
   461  		// Fetch a batch of trie nodes
   462  		results := make([]NodeSyncResult, len(elements))
   463  		for i, element := range elements {
   464  			data, err := srcDb.Node(element.hash)
   465  			if err != nil {
   466  				t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
   467  			}
   468  			results[i] = NodeSyncResult{element.path, data}
   469  		}
   470  		// Process each of the trie nodes
   471  		for _, result := range results {
   472  			if err := sched.ProcessNode(result); err != nil {
   473  				t.Fatalf("failed to process result %v", err)
   474  			}
   475  		}
   476  		batch := diskdb.NewBatch()
   477  		if err := sched.Commit(batch); err != nil {
   478  			t.Fatalf("failed to commit data: %v", err)
   479  		}
   480  		batch.Write()
   481  
   482  		for _, result := range results {
   483  			hash := crypto.Keccak256Hash(result.Data)
   484  			if hash != root {
   485  				added = append(added, hash)
   486  			}
   487  			// Check that all known sub-tries in the synced trie are complete
   488  			if err := checkTrieConsistency(triedb, hash); err != nil {
   489  				t.Fatalf("trie inconsistent: %v", err)
   490  			}
   491  		}
   492  		// Fetch the next batch to retrieve
   493  		paths, nodes, _ = sched.Missing(1)
   494  		elements = elements[:0]
   495  		for i := 0; i < len(paths); i++ {
   496  			elements = append(elements, trieElement{
   497  				path:     paths[i],
   498  				hash:     nodes[i],
   499  				syncPath: NewSyncPath([]byte(paths[i])),
   500  			})
   501  		}
   502  	}
   503  	// Sanity check that removing any node from the database is detected
   504  	for _, hash := range added {
   505  		value, _ := diskdb.Get(hash.Bytes())
   506  		diskdb.Delete(hash.Bytes())
   507  		if err := checkTrieConsistency(triedb, root); err == nil {
   508  			t.Fatalf("trie inconsistency not caught, missing: %x", hash)
   509  		}
   510  		diskdb.Put(hash.Bytes(), value)
   511  	}
   512  }
   513  
   514  // Tests that trie nodes get scheduled lexicographically when having the same
   515  // depth.
   516  func TestSyncOrdering(t *testing.T) {
   517  	// Create a random trie to copy
   518  	srcDb, srcTrie, srcData := makeTestTrie()
   519  
   520  	// Create a destination trie and sync with the scheduler, tracking the requests
   521  	diskdb := rawdb.NewMemoryDatabase()
   522  	triedb := NewDatabase(diskdb)
   523  	sched := NewSync(srcTrie.Hash(), diskdb, nil, srcDb.Scheme())
   524  
   525  	// The code requests are ignored here since there is no code
   526  	// at the testing trie.
   527  	var (
   528  		reqs     []SyncPath
   529  		elements []trieElement
   530  	)
   531  	paths, nodes, _ := sched.Missing(1)
   532  	for i := 0; i < len(paths); i++ {
   533  		elements = append(elements, trieElement{
   534  			path:     paths[i],
   535  			hash:     nodes[i],
   536  			syncPath: NewSyncPath([]byte(paths[i])),
   537  		})
   538  		reqs = append(reqs, NewSyncPath([]byte(paths[i])))
   539  	}
   540  
   541  	for len(elements) > 0 {
   542  		results := make([]NodeSyncResult, len(elements))
   543  		for i, element := range elements {
   544  			data, err := srcDb.Node(element.hash)
   545  			if err != nil {
   546  				t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
   547  			}
   548  			results[i] = NodeSyncResult{element.path, data}
   549  		}
   550  		for _, result := range results {
   551  			if err := sched.ProcessNode(result); err != nil {
   552  				t.Fatalf("failed to process result %v", err)
   553  			}
   554  		}
   555  		batch := diskdb.NewBatch()
   556  		if err := sched.Commit(batch); err != nil {
   557  			t.Fatalf("failed to commit data: %v", err)
   558  		}
   559  		batch.Write()
   560  
   561  		paths, nodes, _ = sched.Missing(1)
   562  		elements = elements[:0]
   563  		for i := 0; i < len(paths); i++ {
   564  			elements = append(elements, trieElement{
   565  				path:     paths[i],
   566  				hash:     nodes[i],
   567  				syncPath: NewSyncPath([]byte(paths[i])),
   568  			})
   569  			reqs = append(reqs, NewSyncPath([]byte(paths[i])))
   570  		}
   571  	}
   572  	// Cross check that the two tries are in sync
   573  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   574  
   575  	// Check that the trie nodes have been requested path-ordered
   576  	for i := 0; i < len(reqs)-1; i++ {
   577  		if len(reqs[i]) > 1 || len(reqs[i+1]) > 1 {
   578  			// In the case of the trie tests, there's no storage so the tuples
   579  			// must always be single items. 2-tuples should be tested in state.
   580  			t.Errorf("Invalid request tuples: len(%v) or len(%v) > 1", reqs[i], reqs[i+1])
   581  		}
   582  		if bytes.Compare(compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0])) > 0 {
   583  			t.Errorf("Invalid request order: %v before %v", compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0]))
   584  		}
   585  	}
   586  }