github.com/klaytn/klaytn@v1.12.1/storage/statedb/sync_test.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2015 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from trie/sync_test.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package statedb
    22  
    23  import (
    24  	"bytes"
    25  	"testing"
    26  
    27  	"github.com/alecthomas/units"
    28  	lru "github.com/hashicorp/golang-lru"
    29  
    30  	"github.com/klaytn/klaytn/common"
    31  	"github.com/klaytn/klaytn/crypto"
    32  	"github.com/klaytn/klaytn/storage/database"
    33  )
    34  
    35  // makeTestTrie create a sample test trie to test node-wise reconstruction.
    36  func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
    37  	// Create an empty trie
    38  	triedb := NewDatabase(database.NewMemoryDBManager())
    39  	trie, _ := NewSecureTrie(common.Hash{}, triedb, nil)
    40  
    41  	// Fill it with some arbitrary data
    42  	content := make(map[string][]byte)
    43  	for i := byte(0); i < 255; i++ {
    44  		// Map the same data under multiple keys
    45  		key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
    46  		content[string(key)] = val
    47  		trie.Update(key, val)
    48  
    49  		key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
    50  		content[string(key)] = val
    51  		trie.Update(key, val)
    52  
    53  		// Add some other data to inflate the trie
    54  		for j := byte(3); j < 13; j++ {
    55  			key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
    56  			content[string(key)] = val
    57  			trie.Update(key, val)
    58  		}
    59  	}
    60  	trie.Commit(nil)
    61  
    62  	// Return the generated trie
    63  	return triedb, trie, content
    64  }
    65  
    66  // checkTrieContents cross references a reconstructed trie with an expected data
    67  // content map.
    68  func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) {
    69  	// Check root availability and trie contents
    70  	trie, err := NewSecureTrie(common.BytesToHash(root), db, nil)
    71  	if err != nil {
    72  		t.Fatalf("failed to create trie at %x: %v", root, err)
    73  	}
    74  	if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil {
    75  		t.Fatalf("inconsistent trie at %x: %v", root, err)
    76  	}
    77  	for key, val := range content {
    78  		if have := trie.Get([]byte(key)); !bytes.Equal(have, val) {
    79  			t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
    80  		}
    81  	}
    82  }
    83  
    84  // checkTrieConsistency checks that all nodes in a trie are indeed present.
    85  func checkTrieConsistency(db *Database, root common.Hash) error {
    86  	// Create and iterate a trie rooted in a subnode
    87  	trie, err := NewSecureTrie(root, db, nil)
    88  	if err != nil {
    89  		return nil // Consider a non existent state consistent
    90  	}
    91  	it := trie.NodeIterator(nil)
    92  	for it.Next(true) {
    93  	}
    94  	return it.Error()
    95  }
    96  
    97  // Tests that an empty trie is not scheduled for syncing.
    98  func TestEmptyTrieSync(t *testing.T) {
    99  	memDBManagerA := database.NewMemoryDBManager()
   100  	memDBManagerB := database.NewMemoryDBManager()
   101  	dbA := NewDatabase(memDBManagerA)
   102  	dbB := NewDatabase(memDBManagerB)
   103  	emptyA, _ := NewTrie(common.Hash{}, dbA, nil)
   104  	emptyB, _ := NewTrie(emptyRoot, dbB, nil)
   105  
   106  	for i, trie := range []*Trie{emptyA, emptyB} {
   107  		sync := NewTrieSync(trie.Hash(), database.NewMemoryDBManager(), nil, NewSyncBloom(1, database.NewMemDB()), nil)
   108  		if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
   109  			t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, nodes, paths, codes)
   110  		}
   111  	}
   112  }
   113  
   114  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   115  // requesting retrieval tasks and returning all of them in one go.
   116  func TestIterativeSyncIndividual(t *testing.T)       { testIterativeTrieSync(t, 1, false) }
   117  func TestIterativeSyncBatched(t *testing.T)          { testIterativeTrieSync(t, 100, false) }
   118  func TestIterativeSyncIndividualByPath(t *testing.T) { testIterativeTrieSync(t, 1, true) }
   119  func TestIterativeSyncBatchedByPath(t *testing.T)    { testIterativeTrieSync(t, 100, true) }
   120  
   121  func trieSyncLoop(t *testing.T, count int, srcTrie *SecureTrie, sched *TrieSync, srcDB *Database, diskDB database.Database, bypath bool) {
   122  	nodes, paths, codes := sched.Missing(count)
   123  	var (
   124  		hashQueue []common.Hash
   125  		pathQueue []SyncPath
   126  	)
   127  	if !bypath {
   128  		hashQueue = append(append(hashQueue[:0], nodes...), codes...)
   129  	} else {
   130  		hashQueue = append(hashQueue[:0], codes...)
   131  		pathQueue = append(pathQueue[:0], paths...)
   132  	}
   133  	for len(hashQueue)+len(pathQueue) > 0 {
   134  		results := make([]SyncResult, len(hashQueue)+len(pathQueue))
   135  		for i, hash := range hashQueue {
   136  			data, err := srcDB.Node(hash.ExtendZero())
   137  			if err != nil {
   138  				t.Fatalf("failed to retrieve node data for hash %x: %v", hash, err)
   139  			}
   140  			results[i] = SyncResult{hash, data, nil}
   141  		}
   142  		for i, path := range pathQueue {
   143  			data, _, err := srcTrie.TryGetNode(path[0])
   144  			if err != nil {
   145  				t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
   146  			}
   147  			results[len(hashQueue)+i] = SyncResult{crypto.Keccak256Hash(data), data, nil}
   148  		}
   149  		for index, result := range results {
   150  			if err := sched.Process(result); err != nil {
   151  				t.Fatalf("failed to process result #%d: %v", index, err)
   152  			}
   153  		}
   154  		batch := diskDB.NewBatch()
   155  		if index, err := sched.Commit(batch); err != nil {
   156  			t.Fatalf("failed to commit data #%d: %v", index, err)
   157  		}
   158  		batch.Write()
   159  
   160  		nodes, paths, codes = sched.Missing(count)
   161  		if !bypath {
   162  			hashQueue = append(append(hashQueue[:0], nodes...), codes...)
   163  		} else {
   164  			hashQueue = append(hashQueue[:0], codes...)
   165  			pathQueue = append(pathQueue[:0], paths...)
   166  		}
   167  	}
   168  }
   169  
   170  func testIterativeTrieSync(t *testing.T, count int, bypath bool) {
   171  	// Create a random trie to copy
   172  	srcDb, srcTrie, srcData := makeTestTrie()
   173  
   174  	// test with bloom filter
   175  	{
   176  		memDBManager := database.NewMemoryDBManager()
   177  		diskdb := memDBManager.GetMemDB()
   178  		triedb := NewDatabase(memDBManager)
   179  		sched := NewTrieSync(srcTrie.Hash(), memDBManager, nil, NewSyncBloom(1, diskdb), nil)
   180  
   181  		trieSyncLoop(t, count, srcTrie, sched, srcDb, diskdb, bypath)
   182  		// Cross check that the two tries are in sync
   183  		checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   184  	}
   185  
   186  	// test with lru cache
   187  	{
   188  		memDBManager := database.NewMemoryDBManager()
   189  		diskdb := memDBManager.GetMemDB()
   190  		triedb := NewDatabase(memDBManager)
   191  		lruCache, _ := lru.New(int(1 * units.MB / common.HashLength))
   192  		sched := NewTrieSync(srcTrie.Hash(), memDBManager, nil, nil, lruCache)
   193  
   194  		trieSyncLoop(t, count, srcTrie, sched, srcDb, diskdb, bypath)
   195  		// Cross check that the two tries are in sync
   196  		checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   197  	}
   198  
   199  	// test without bloom, lru cache
   200  	{
   201  		memDBManager := database.NewMemoryDBManager()
   202  		diskdb := memDBManager.GetMemDB()
   203  		triedb := NewDatabase(memDBManager)
   204  		sched := NewTrieSync(srcTrie.Hash(), memDBManager, nil, nil, nil)
   205  
   206  		trieSyncLoop(t, count, srcTrie, sched, srcDb, diskdb, bypath)
   207  		// Cross check that the two tries are in sync
   208  		checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   209  	}
   210  
   211  	// test with bloom, lru cache
   212  	{
   213  		memDBManager := database.NewMemoryDBManager()
   214  		diskdb := memDBManager.GetMemDB()
   215  		triedb := NewDatabase(memDBManager)
   216  		bloom := NewSyncBloom(1, diskdb)
   217  		lruCache, _ := lru.New(int(1 * units.MB / common.HashLength))
   218  		sched := NewTrieSync(srcTrie.Hash(), memDBManager, nil, bloom, lruCache)
   219  
   220  		trieSyncLoop(t, count, srcTrie, sched, srcDb, diskdb, bypath)
   221  		// Cross check that the two tries are in sync
   222  		checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   223  	}
   224  }
   225  
   226  // Tests that the trie scheduler can correctly reconstruct the state even if only
   227  // partial results are returned, and the others sent only later.
   228  func TestIterativeDelayedTrieSync(t *testing.T) {
   229  	// Create a random trie to copy
   230  	srcDb, srcTrie, srcData := makeTestTrie()
   231  
   232  	// Create a destination trie and sync with the scheduler
   233  	memDBManager := database.NewMemoryDBManager()
   234  	diskdb := memDBManager.GetMemDB()
   235  	triedb := NewDatabase(memDBManager)
   236  	sched := NewTrieSync(srcTrie.Hash(), memDBManager, nil, NewSyncBloom(1, diskdb), nil)
   237  
   238  	nodes, _, codes := sched.Missing(10000)
   239  	queue := append(append([]common.Hash{}, nodes...), codes...)
   240  	for len(queue) > 0 {
   241  		// Sync only half of the scheduled nodes
   242  		results := make([]SyncResult, len(queue)/2+1)
   243  		for i, hash := range queue[:len(results)] {
   244  			data, err := srcDb.Node(hash.ExtendZero())
   245  			if err != nil {
   246  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   247  			}
   248  			results[i] = SyncResult{hash, data, nil}
   249  		}
   250  		for index, result := range results {
   251  			if err := sched.Process(result); err != nil {
   252  				t.Fatalf("failed to process result #%d: %v", index, err)
   253  			}
   254  		}
   255  		batch := diskdb.NewBatch()
   256  		if index, err := sched.Commit(batch); err != nil {
   257  			t.Fatalf("failed to commit data #%d: %v", index, err)
   258  		}
   259  		batch.Write()
   260  		nodes, _, codes = sched.Missing(10000)
   261  		queue = append(append(queue[len(results):], nodes...), codes...)
   262  	}
   263  	// Cross check that the two tries are in sync
   264  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   265  }
   266  
   267  // Tests that given a root hash, a trie can sync iteratively on a single thread,
   268  // requesting retrieval tasks and returning all of them in one go, however in a
   269  // random order.
   270  func TestIterativeRandomTrieSyncIndividual(t *testing.T) { testIterativeRandomTrieSync(t, 1) }
   271  func TestIterativeRandomTrieSyncBatched(t *testing.T)    { testIterativeRandomTrieSync(t, 100) }
   272  
   273  func testIterativeRandomTrieSync(t *testing.T, count int) {
   274  	// Create a random trie to copy
   275  	srcDb, srcTrie, srcData := makeTestTrie()
   276  
   277  	// Create a destination trie and sync with the scheduler
   278  	memDBManager := database.NewMemoryDBManager()
   279  	diskdb := memDBManager.GetMemDB()
   280  	triedb := NewDatabase(memDBManager)
   281  	sched := NewTrieSync(srcTrie.Hash(), memDBManager, nil, NewSyncBloom(1, diskdb), nil)
   282  
   283  	queue := make(map[common.Hash]struct{})
   284  	nodes, _, codes := sched.Missing(count)
   285  	for _, hash := range append(nodes, codes...) {
   286  		queue[hash] = struct{}{}
   287  	}
   288  	for len(queue) > 0 {
   289  		// Fetch all the queued nodes in a random order
   290  		results := make([]SyncResult, 0, len(queue))
   291  		for hash := range queue {
   292  			data, err := srcDb.Node(hash.ExtendZero())
   293  			if err != nil {
   294  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   295  			}
   296  			results = append(results, SyncResult{hash, data, nil})
   297  		}
   298  		// Feed the retrieved results back and queue new tasks
   299  		for index, result := range results {
   300  			if err := sched.Process(result); err != nil {
   301  				t.Fatalf("failed to process result #%d: %v", index, err)
   302  			}
   303  		}
   304  		batch := diskdb.NewBatch()
   305  		if index, err := sched.Commit(batch); err != nil {
   306  			t.Fatalf("failed to commit data #%d: %v", index, err)
   307  		}
   308  		batch.Write()
   309  		queue = make(map[common.Hash]struct{})
   310  		nodes, _, codes = sched.Missing(count)
   311  		for _, hash := range append(nodes, codes...) {
   312  			queue[hash] = struct{}{}
   313  		}
   314  	}
   315  	// Cross check that the two tries are in sync
   316  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   317  }
   318  
   319  // Tests that the trie scheduler can correctly reconstruct the state even if only
   320  // partial results are returned (Even those randomly), others sent only later.
   321  func TestIterativeRandomDelayedTrieSync(t *testing.T) {
   322  	// Create a random trie to copy
   323  	srcDb, srcTrie, srcData := makeTestTrie()
   324  
   325  	// Create a destination trie and sync with the scheduler
   326  	memDBManager := database.NewMemoryDBManager()
   327  	diskdb := memDBManager.GetMemDB()
   328  	triedb := NewDatabase(memDBManager)
   329  	sched := NewTrieSync(srcTrie.Hash(), memDBManager, nil, NewSyncBloom(1, diskdb), nil)
   330  
   331  	queue := make(map[common.Hash]struct{})
   332  	nodes, _, codes := sched.Missing(10000)
   333  	for _, hash := range append(nodes, codes...) {
   334  		queue[hash] = struct{}{}
   335  	}
   336  	for len(queue) > 0 {
   337  		// Sync only half of the scheduled nodes, even those in random order
   338  		results := make([]SyncResult, 0, len(queue)/2+1)
   339  		for hash := range queue {
   340  			data, err := srcDb.Node(hash.ExtendZero())
   341  			if err != nil {
   342  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   343  			}
   344  			results = append(results, SyncResult{hash, data, nil})
   345  
   346  			if len(results) >= cap(results) {
   347  				break
   348  			}
   349  		}
   350  		// Feed the retrieved results back and queue new tasks
   351  		for index, result := range results {
   352  			if err := sched.Process(result); err != nil {
   353  				t.Fatalf("failed to process result #%d: %v", index, err)
   354  			}
   355  		}
   356  		batch := diskdb.NewBatch()
   357  		if index, err := sched.Commit(batch); err != nil {
   358  			t.Fatalf("failed to commit data #%d: %v", index, err)
   359  		}
   360  		batch.Write()
   361  		for _, result := range results {
   362  			delete(queue, result.Hash)
   363  		}
   364  		nodes, _, codes := sched.Missing(10000)
   365  		for _, hash := range append(nodes, codes...) {
   366  			queue[hash] = struct{}{}
   367  		}
   368  	}
   369  	// Cross check that the two tries are in sync
   370  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   371  }
   372  
   373  // Tests that a trie sync will not request nodes multiple times, even if they
   374  // have such references.
   375  func TestDuplicateAvoidanceTrieSync(t *testing.T) {
   376  	// Create a random trie to copy
   377  	srcDb, srcTrie, srcData := makeTestTrie()
   378  
   379  	// Create a destination trie and sync with the scheduler
   380  	memDBManager := database.NewMemoryDBManager()
   381  	diskdb := memDBManager.GetMemDB()
   382  	triedb := NewDatabase(memDBManager)
   383  	sched := NewTrieSync(srcTrie.Hash(), memDBManager, nil, NewSyncBloom(1, diskdb), nil)
   384  
   385  	nodes, _, codes := sched.Missing(0)
   386  	queue := append(append([]common.Hash{}, nodes...), codes...)
   387  	requested := make(map[common.Hash]struct{})
   388  
   389  	for len(queue) > 0 {
   390  		results := make([]SyncResult, len(queue))
   391  		for i, hash := range queue {
   392  			data, err := srcDb.Node(hash.ExtendZero())
   393  			if err != nil {
   394  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   395  			}
   396  			if _, ok := requested[hash]; ok {
   397  				t.Errorf("hash %x already requested once", hash)
   398  			}
   399  			requested[hash] = struct{}{}
   400  
   401  			results[i] = SyncResult{hash, data, nil}
   402  		}
   403  		for index, result := range results {
   404  			if err := sched.Process(result); err != nil {
   405  				t.Fatalf("failed to process result #%d: %v", index, err)
   406  			}
   407  		}
   408  		batch := diskdb.NewBatch()
   409  		if index, err := sched.Commit(batch); err != nil {
   410  			t.Fatalf("failed to commit data #%d: %v", index, err)
   411  		}
   412  		batch.Write()
   413  		nodes, _, codes = sched.Missing(0)
   414  		queue = append(append(queue[:0], nodes...), codes...)
   415  	}
   416  	// Cross check that the two tries are in sync
   417  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   418  }
   419  
   420  // Tests that at any point in time during a sync, only complete sub-tries are in
   421  // the database.
   422  func TestIncompleteTrieSync(t *testing.T) {
   423  	// Create a random trie to copy
   424  	srcDb, srcTrie, _ := makeTestTrie()
   425  
   426  	// Create a destination trie and sync with the scheduler
   427  	dbm := database.NewMemoryDBManager()
   428  	triedb := NewDatabase(dbm)
   429  	sched := NewTrieSync(srcTrie.Hash(), dbm, nil, NewSyncBloom(1, dbm.GetMemDB()), nil)
   430  
   431  	var added []common.Hash
   432  	nodes, _, codes := sched.Missing(1)
   433  	queue := append(append([]common.Hash{}, nodes...), codes...)
   434  	for len(queue) > 0 {
   435  		// Fetch a batch of trie nodes
   436  		results := make([]SyncResult, len(queue))
   437  		for i, hash := range queue {
   438  			data, err := srcDb.Node(hash.ExtendZero())
   439  			if err != nil {
   440  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   441  			}
   442  			results[i] = SyncResult{hash, data, nil}
   443  		}
   444  		// Process each of the trie nodes
   445  		for index, result := range results {
   446  			if err := sched.Process(result); err != nil {
   447  				t.Fatalf("failed to process result #%d: %v", index, err)
   448  			}
   449  		}
   450  		batch := dbm.NewBatch(database.StateTrieDB)
   451  		if index, err := sched.Commit(batch); err != nil {
   452  			t.Fatalf("failed to commit data #%d: %v", index, err)
   453  		}
   454  		batch.Write()
   455  		for _, result := range results {
   456  			added = append(added, result.Hash)
   457  		}
   458  		// Check that all known sub-tries in the synced trie are complete
   459  		for _, root := range added {
   460  			if err := checkTrieConsistency(triedb, root); err != nil {
   461  				t.Fatalf("trie inconsistent: %v", err)
   462  			}
   463  		}
   464  		// Fetch the next batch to retrieve
   465  		nodes, _, codes = sched.Missing(1)
   466  		queue = append(append(queue[:0], nodes...), codes...)
   467  	}
   468  	// Sanity check that removing any node from the database is detected
   469  	for _, hash := range added[1:] {
   470  		nodehash := hash.ExtendZero()
   471  		value, _ := dbm.ReadTrieNode(nodehash)
   472  
   473  		dbm.DeleteTrieNode(nodehash)
   474  		if err := checkTrieConsistency(triedb, added[0]); err == nil {
   475  			t.Fatalf("trie inconsistency not caught, missing: %x", hash)
   476  		}
   477  		dbm.WriteTrieNode(nodehash, value)
   478  	}
   479  }
   480  
   481  // Tests that trie nodes get scheduled lexicographically when having the same
   482  // depth.
   483  func TestSyncOrdering(t *testing.T) {
   484  	// Create a random trie to copy
   485  	srcDb, srcTrie, srcData := makeTestTrie()
   486  
   487  	// Create a destination trie and sync with the scheduler, tracking the requests
   488  	diskdb := database.NewMemoryDBManager()
   489  	triedb := NewDatabase(diskdb)
   490  	sched := NewTrieSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb.GetMemDB()), nil)
   491  
   492  	nodes, paths, _ := sched.Missing(1)
   493  	queue := append([]common.Hash{}, nodes...)
   494  	reqs := append([]SyncPath{}, paths...)
   495  
   496  	for len(queue) > 0 {
   497  		results := make([]SyncResult, len(queue))
   498  		for i, hash := range queue {
   499  			data, err := srcDb.Node(hash.ExtendZero())
   500  			if err != nil {
   501  				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
   502  			}
   503  			results[i] = SyncResult{hash, data, nil}
   504  		}
   505  		for _, result := range results {
   506  			if err := sched.Process(result); err != nil {
   507  				t.Fatalf("failed to process result %v", err)
   508  			}
   509  		}
   510  		batch := diskdb.NewBatch(database.StateTrieDB)
   511  		if _, err := sched.Commit(batch); err != nil {
   512  			t.Fatalf("failed to commit data: %v", err)
   513  		}
   514  		batch.Write()
   515  
   516  		nodes, paths, _ = sched.Missing(1)
   517  		queue = append(queue[:0], nodes...)
   518  		reqs = append(reqs, paths...)
   519  	}
   520  	// Cross check that the two tries are in sync
   521  	checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
   522  
   523  	// Check that the trie nodes have been requested path-ordered
   524  	for i := 0; i < len(reqs)-1; i++ {
   525  		if len(reqs[i]) > 1 || len(reqs[i+1]) > 1 {
   526  			// In the case of the trie tests, there's no storage so the tuples
   527  			// must always be single items. 2-tuples should be tested in state.
   528  			t.Errorf("Invalid request tuples: len(%v) or len(%v) > 1", reqs[i], reqs[i+1])
   529  		}
   530  		if bytes.Compare(compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0])) > 0 {
   531  			t.Errorf("Invalid request order: %v before %v", compactToHex(reqs[i][0]), compactToHex(reqs[i+1][0]))
   532  		}
   533  	}
   534  }