github.com/core-coin/go-core/v2@v2.1.9/les/handler_test.go (about)

     1  // Copyright 2016 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"encoding/binary"
    21  	"math/big"
    22  	"math/rand"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/core-coin/go-core/v2/consensus/cryptore"
    27  
    28  	"github.com/core-coin/go-core/v2/common"
    29  	"github.com/core-coin/go-core/v2/common/mclock"
    30  	"github.com/core-coin/go-core/v2/core"
    31  	"github.com/core-coin/go-core/v2/core/rawdb"
    32  	"github.com/core-coin/go-core/v2/core/types"
    33  	"github.com/core-coin/go-core/v2/crypto"
    34  	"github.com/core-coin/go-core/v2/light"
    35  	"github.com/core-coin/go-core/v2/p2p"
    36  	"github.com/core-coin/go-core/v2/params"
    37  	"github.com/core-coin/go-core/v2/rlp"
    38  	"github.com/core-coin/go-core/v2/trie"
    39  	"github.com/core-coin/go-core/v2/xcb/downloader"
    40  )
    41  
    42  func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {
    43  	type resp struct {
    44  		ReqID, BV uint64
    45  		Data      interface{}
    46  	}
    47  	return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
    48  }
    49  
    50  // Tests that block headers can be retrieved from a remote chain based on user queries.
    51  func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
    52  func TestGetBlockHeadersLes3(t *testing.T) { testGetBlockHeaders(t, 3) }
    53  
    54  func testGetBlockHeaders(t *testing.T, protocol int) {
    55  	server, tearDown := newServerEnv(t, downloader.MaxHashFetch+15, protocol, nil, false, true, 0)
    56  	defer tearDown()
    57  
    58  	bc := server.handler.blockchain
    59  
    60  	// Create a "random" unknown hash for testing
    61  	var unknown common.Hash
    62  	for i := range unknown {
    63  		unknown[i] = byte(i)
    64  	}
    65  	// Create a batch of tests for various scenarios
    66  	limit := uint64(MaxHeaderFetch)
    67  	tests := []struct {
    68  		query  *getBlockHeadersData // The query to execute for header retrieval
    69  		expect []common.Hash        // The hashes of the block whose headers are expected
    70  	}{
    71  		// A single random block should be retrievable by hash and number too
    72  		{
    73  			&getBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
    74  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    75  		}, {
    76  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
    77  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    78  		},
    79  		// Multiple headers should be retrievable in both directions
    80  		{
    81  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
    82  			[]common.Hash{
    83  				bc.GetBlockByNumber(limit / 2).Hash(),
    84  				bc.GetBlockByNumber(limit/2 + 1).Hash(),
    85  				bc.GetBlockByNumber(limit/2 + 2).Hash(),
    86  			},
    87  		}, {
    88  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
    89  			[]common.Hash{
    90  				bc.GetBlockByNumber(limit / 2).Hash(),
    91  				bc.GetBlockByNumber(limit/2 - 1).Hash(),
    92  				bc.GetBlockByNumber(limit/2 - 2).Hash(),
    93  			},
    94  		},
    95  		// Multiple headers with skip lists should be retrievable
    96  		{
    97  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
    98  			[]common.Hash{
    99  				bc.GetBlockByNumber(limit / 2).Hash(),
   100  				bc.GetBlockByNumber(limit/2 + 4).Hash(),
   101  				bc.GetBlockByNumber(limit/2 + 8).Hash(),
   102  			},
   103  		}, {
   104  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
   105  			[]common.Hash{
   106  				bc.GetBlockByNumber(limit / 2).Hash(),
   107  				bc.GetBlockByNumber(limit/2 - 4).Hash(),
   108  				bc.GetBlockByNumber(limit/2 - 8).Hash(),
   109  			},
   110  		},
   111  		// The chain endpoints should be retrievable
   112  		{
   113  			&getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
   114  			[]common.Hash{bc.GetBlockByNumber(0).Hash()},
   115  		}, {
   116  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1},
   117  			[]common.Hash{bc.CurrentBlock().Hash()},
   118  		},
   119  		// Ensure protocol limits are honored
   120  		//{
   121  		//	&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
   122  		//	[]common.Hash{},
   123  		//},
   124  		// Check that requesting more than available is handled gracefully
   125  		{
   126  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
   127  			[]common.Hash{
   128  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   129  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(),
   130  			},
   131  		}, {
   132  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
   133  			[]common.Hash{
   134  				bc.GetBlockByNumber(4).Hash(),
   135  				bc.GetBlockByNumber(0).Hash(),
   136  			},
   137  		},
   138  		// Check that requesting more than available is handled gracefully, even if mid skip
   139  		{
   140  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
   141  			[]common.Hash{
   142  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   143  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(),
   144  			},
   145  		}, {
   146  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
   147  			[]common.Hash{
   148  				bc.GetBlockByNumber(4).Hash(),
   149  				bc.GetBlockByNumber(1).Hash(),
   150  			},
   151  		},
   152  		// Check that non existing headers aren't returned
   153  		{
   154  			&getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
   155  			[]common.Hash{},
   156  		}, {
   157  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1},
   158  			[]common.Hash{},
   159  		},
   160  	}
   161  	// Run each of the tests and verify the results against the chain
   162  	var reqID uint64
   163  	for i, tt := range tests {
   164  		// Collect the headers to expect in the response
   165  		var headers []*types.Header
   166  		for _, hash := range tt.expect {
   167  			headers = append(headers, bc.GetHeaderByHash(hash))
   168  		}
   169  		// Send the hash request and verify the response
   170  		reqID++
   171  
   172  		sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, tt.query)
   173  		if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
   174  			t.Errorf("test %d: headers mismatch: %v", i, err)
   175  		}
   176  	}
   177  }
   178  
   179  // Tests that block contents can be retrieved from a remote chain based on their hashes.
   180  func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
   181  func TestGetBlockBodiesLes3(t *testing.T) { testGetBlockBodies(t, 3) }
   182  
   183  func testGetBlockBodies(t *testing.T, protocol int) {
   184  	server, tearDown := newServerEnv(t, downloader.MaxBlockFetch+15, protocol, nil, false, true, 0)
   185  	defer tearDown()
   186  
   187  	bc := server.handler.blockchain
   188  
   189  	// Create a batch of tests for various scenarios
   190  	limit := MaxBodyFetch
   191  	tests := []struct {
   192  		random    int           // Number of blocks to fetch randomly from the chain
   193  		explicit  []common.Hash // Explicitly requested blocks
   194  		available []bool        // Availability of explicitly requested blocks
   195  		expected  int           // Total number of existing blocks to expect
   196  	}{
   197  		{1, nil, nil, 1},         // A single random block should be retrievable
   198  		{10, nil, nil, 10},       // Multiple random blocks should be retrievable
   199  		{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
   200  		//{limit + 1, nil, nil, limit},                                  // No more than the possible block count should be returned
   201  		{0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1},      // The genesis block should be retrievable
   202  		{0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
   203  		{0, []common.Hash{{}}, []bool{false}, 0},                      // A non existent block should not be returned
   204  
   205  		// Existing and non-existing blocks interleaved should not cause problems
   206  		{0, []common.Hash{
   207  			{},
   208  			bc.GetBlockByNumber(1).Hash(),
   209  			{},
   210  			bc.GetBlockByNumber(10).Hash(),
   211  			{},
   212  			bc.GetBlockByNumber(100).Hash(),
   213  			{},
   214  		}, []bool{false, true, false, true, false, true, false}, 3},
   215  	}
   216  	// Run each of the tests and verify the results against the chain
   217  	var reqID uint64
   218  	for i, tt := range tests {
   219  		// Collect the hashes to request, and the response to expect
   220  		var hashes []common.Hash
   221  		seen := make(map[int64]bool)
   222  		var bodies []*types.Body
   223  
   224  		for j := 0; j < tt.random; j++ {
   225  			for {
   226  				num := rand.Int63n(int64(bc.CurrentBlock().NumberU64()))
   227  				if !seen[num] {
   228  					seen[num] = true
   229  
   230  					block := bc.GetBlockByNumber(uint64(num))
   231  					hashes = append(hashes, block.Hash())
   232  					if len(bodies) < tt.expected {
   233  						bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   234  					}
   235  					break
   236  				}
   237  			}
   238  		}
   239  		for j, hash := range tt.explicit {
   240  			hashes = append(hashes, hash)
   241  			if tt.available[j] && len(bodies) < tt.expected {
   242  				block := bc.GetBlockByHash(hash)
   243  				bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   244  			}
   245  		}
   246  		reqID++
   247  
   248  		// Send the hash request and verify the response
   249  		sendRequest(server.peer.app, GetBlockBodiesMsg, reqID, hashes)
   250  		if err := expectResponse(server.peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
   251  			t.Errorf("test %d: bodies mismatch: %v", i, err)
   252  		}
   253  	}
   254  }
   255  
   256  // Tests that the contract codes can be retrieved based on account addresses.
   257  func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
   258  func TestGetCodeLes3(t *testing.T) { testGetCode(t, 3) }
   259  
   260  func testGetCode(t *testing.T, protocol int) {
   261  	// Assemble the test environment
   262  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   263  	defer tearDown()
   264  	bc := server.handler.blockchain
   265  
   266  	var codereqs []*CodeReq
   267  	var codes [][]byte
   268  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   269  		header := bc.GetHeaderByNumber(i)
   270  		req := &CodeReq{
   271  			BHash:  header.Hash(),
   272  			AccKey: crypto.SHA3(testContractAddr[:]),
   273  		}
   274  		codereqs = append(codereqs, req)
   275  		if i >= testContractDeployed {
   276  			codes = append(codes, testContractCodeDeployed)
   277  		}
   278  	}
   279  
   280  	sendRequest(server.peer.app, GetCodeMsg, 42, codereqs)
   281  	if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
   282  		t.Errorf("codes mismatch: %v", err)
   283  	}
   284  }
   285  
   286  // Tests that the stale contract codes can't be retrieved based on account addresses.
   287  func TestGetStaleCodeLes2(t *testing.T) { testGetStaleCode(t, 2) }
   288  func TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) }
   289  
   290  func testGetStaleCode(t *testing.T, protocol int) {
   291  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
   292  	defer tearDown()
   293  	bc := server.handler.blockchain
   294  
   295  	check := func(number uint64, expected [][]byte) {
   296  		req := &CodeReq{
   297  			BHash:  bc.GetHeaderByNumber(number).Hash(),
   298  			AccKey: crypto.SHA3(testContractAddr[:]),
   299  		}
   300  		sendRequest(server.peer.app, GetCodeMsg, 42, []*CodeReq{req})
   301  		if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
   302  			t.Errorf("codes mismatch: %v", err)
   303  		}
   304  	}
   305  	check(0, [][]byte{})                                                          // Non-exist contract
   306  	check(testContractDeployed, [][]byte{})                                       // Stale contract
   307  	check(bc.CurrentHeader().Number.Uint64(), [][]byte{testContractCodeDeployed}) // Fresh contract
   308  }
   309  
   310  // Tests that the transaction receipts can be retrieved based on hashes.
   311  func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
   312  func TestGetReceiptLes3(t *testing.T) { testGetReceipt(t, 3) }
   313  
   314  func testGetReceipt(t *testing.T, protocol int) {
   315  	// Assemble the test environment
   316  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   317  	defer tearDown()
   318  
   319  	bc := server.handler.blockchain
   320  
   321  	// Collect the hashes to request, and the response to expect
   322  	var receipts []types.Receipts
   323  	var hashes []common.Hash
   324  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   325  		block := bc.GetBlockByNumber(i)
   326  
   327  		hashes = append(hashes, block.Hash())
   328  		receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64()))
   329  	}
   330  	// Send the hash request and verify the response
   331  	sendRequest(server.peer.app, GetReceiptsMsg, 42, hashes)
   332  	if err := expectResponse(server.peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
   333  		t.Errorf("receipts mismatch: %v", err)
   334  	}
   335  }
   336  
   337  // Tests that trie merkle proofs can be retrieved
   338  func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
   339  func TestGetProofsLes3(t *testing.T) { testGetProofs(t, 3) }
   340  
   341  func testGetProofs(t *testing.T, protocol int) {
   342  	// Assemble the test environment
   343  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   344  	defer tearDown()
   345  
   346  	bc := server.handler.blockchain
   347  
   348  	var proofreqs []ProofReq
   349  	proofsV2 := light.NewNodeSet()
   350  
   351  	accounts := []common.Address{bankKey.Address(), userKey1.Address(), userKey2.Address(), signerKey.Address(), {}}
   352  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   353  		header := bc.GetHeaderByNumber(i)
   354  		trie, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   355  
   356  		for _, acc := range accounts {
   357  			req := ProofReq{
   358  				BHash: header.Hash(),
   359  				Key:   crypto.SHA3(acc[:]),
   360  			}
   361  			proofreqs = append(proofreqs, req)
   362  			trie.Prove(crypto.SHA3(acc[:]), 0, proofsV2)
   363  		}
   364  	}
   365  	// Send the proof request and verify the response
   366  	sendRequest(server.peer.app, GetProofsV2Msg, 42, proofreqs)
   367  	if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
   368  		t.Errorf("proofs mismatch: %v", err)
   369  	}
   370  }
   371  
   372  // Tests that the stale contract codes can't be retrieved based on account addresses.
   373  func TestGetStaleProofLes2(t *testing.T) { testGetStaleProof(t, 2) }
   374  func TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) }
   375  
   376  func testGetStaleProof(t *testing.T, protocol int) {
   377  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
   378  	defer tearDown()
   379  	bc := server.handler.blockchain
   380  
   381  	check := func(number uint64, wantOK bool) {
   382  		var (
   383  			header  = bc.GetHeaderByNumber(number)
   384  			account = crypto.SHA3(userKey1.Address().Bytes())
   385  		)
   386  		req := &ProofReq{
   387  			BHash: header.Hash(),
   388  			Key:   account,
   389  		}
   390  		sendRequest(server.peer.app, GetProofsV2Msg, 42, []*ProofReq{req})
   391  
   392  		var expected []rlp.RawValue
   393  		if wantOK {
   394  			proofsV2 := light.NewNodeSet()
   395  			t, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   396  			t.Prove(account, 0, proofsV2)
   397  			expected = proofsV2.NodeList()
   398  		}
   399  		if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
   400  			t.Errorf("codes mismatch: %v", err)
   401  		}
   402  	}
   403  	check(0, false)                                 // Non-exist proof
   404  	check(2, false)                                 // Stale proof
   405  	check(bc.CurrentHeader().Number.Uint64(), true) // Fresh proof
   406  }
   407  
   408  // Tests that CHT proofs can be correctly retrieved.
   409  func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
   410  func TestGetCHTProofsLes3(t *testing.T) { testGetCHTProofs(t, 3) }
   411  
   412  func testGetCHTProofs(t *testing.T, protocol int) {
   413  	config := light.TestServerIndexerConfig
   414  
   415  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   416  		for {
   417  			cs, _, _ := cIndexer.Sections()
   418  			if cs >= 1 {
   419  				break
   420  			}
   421  			time.Sleep(10 * time.Millisecond)
   422  		}
   423  	}
   424  	server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers, false, true, 0)
   425  	defer tearDown()
   426  
   427  	bc := server.handler.blockchain
   428  
   429  	// Assemble the proofs from the different protocols
   430  	header := bc.GetHeaderByNumber(config.ChtSize - 1)
   431  	rlp, _ := rlp.EncodeToBytes(header)
   432  
   433  	key := make([]byte, 8)
   434  	binary.BigEndian.PutUint64(key, config.ChtSize-1)
   435  
   436  	proofsV2 := HelperTrieResps{
   437  		AuxData: [][]byte{rlp},
   438  	}
   439  	root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
   440  	trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
   441  	trie.Prove(key, 0, &proofsV2.Proofs)
   442  	// Assemble the requests for the different protocols
   443  	requestsV2 := []HelperTrieReq{{
   444  		Type:    htCanonical,
   445  		TrieIdx: 0,
   446  		Key:     key,
   447  		AuxReq:  auxHeader,
   448  	}}
   449  	// Send the proof request and verify the response
   450  	sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requestsV2)
   451  	if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
   452  		t.Errorf("proofs mismatch: %v", err)
   453  	}
   454  }
   455  
   456  func TestGetBloombitsProofsLes2(t *testing.T) { testGetBloombitsProofs(t, 2) }
   457  func TestGetBloombitsProofsLes3(t *testing.T) { testGetBloombitsProofs(t, 3) }
   458  
   459  // Tests that bloombits proofs can be correctly retrieved.
   460  func testGetBloombitsProofs(t *testing.T, protocol int) {
   461  	config := light.TestServerIndexerConfig
   462  
   463  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   464  		for {
   465  			bts, _, _ := btIndexer.Sections()
   466  			if bts >= 1 {
   467  				break
   468  			}
   469  			time.Sleep(10 * time.Millisecond)
   470  		}
   471  	}
   472  	server, tearDown := newServerEnv(t, int(config.BloomTrieSize+config.BloomTrieConfirms), protocol, waitIndexers, false, true, 0)
   473  	defer tearDown()
   474  
   475  	bc := server.handler.blockchain
   476  
   477  	// Request and verify each bit of the bloom bits proofs
   478  	for bit := 0; bit < 2048; bit++ {
   479  		// Assemble the request and proofs for the bloombits
   480  		key := make([]byte, 10)
   481  
   482  		binary.BigEndian.PutUint16(key[:2], uint16(bit))
   483  		// Only the first bloom section has data.
   484  		binary.BigEndian.PutUint64(key[2:], 0)
   485  
   486  		requests := []HelperTrieReq{{
   487  			Type:    htBloomBits,
   488  			TrieIdx: 0,
   489  			Key:     key,
   490  		}}
   491  		var proofs HelperTrieResps
   492  
   493  		root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
   494  		trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
   495  		trie.Prove(key, 0, &proofs.Proofs)
   496  
   497  		// Send the proof request and verify the response
   498  		sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requests)
   499  		if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
   500  			t.Errorf("bit %d: proofs mismatch: %v", bit, err)
   501  		}
   502  	}
   503  }
   504  
   505  func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, 2) }
   506  func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, 3) }
   507  
   508  func testTransactionStatus(t *testing.T, protocol int) {
   509  	server, tearDown := newServerEnv(t, 0, protocol, nil, false, true, 0)
   510  	defer tearDown()
   511  	server.handler.addTxsSync = true
   512  
   513  	chain := server.handler.blockchain
   514  
   515  	var reqID uint64
   516  
   517  	test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
   518  		reqID++
   519  		if send {
   520  			sendRequest(server.peer.app, SendTxV2Msg, reqID, types.Transactions{tx})
   521  		} else {
   522  			sendRequest(server.peer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()})
   523  		}
   524  		if err := expectResponse(server.peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
   525  			t.Errorf("transaction status mismatch: err - %v", err)
   526  		}
   527  	}
   528  	signer := types.NewNucleusSigner(server.backend.Blockchain().Config().NetworkID)
   529  
   530  	// test error status by sending an underpriced transaction
   531  	tx0, _ := types.SignTx(types.NewTransaction(0, userKey1.Address(), big.NewInt(10000), params.TxEnergy, nil, nil), signer, bankKey)
   532  	test(tx0, true, light.TxStatus{Status: core.TxStatusUnknown, Error: core.ErrUnderpriced.Error()})
   533  
   534  	tx1, _ := types.SignTx(types.NewTransaction(0, userKey1.Address(), big.NewInt(10000), params.TxEnergy, big.NewInt(100000000000), nil), signer, bankKey)
   535  	test(tx1, false, light.TxStatus{Status: core.TxStatusUnknown}) // query before sending, should be unknown
   536  	test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // send valid processable tx, should return pending
   537  	test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // adding it again should not return an error
   538  
   539  	tx2, _ := types.SignTx(types.NewTransaction(1, userKey1.Address(), big.NewInt(10000), params.TxEnergy, big.NewInt(100000000000), nil), signer, bankKey)
   540  	tx3, _ := types.SignTx(types.NewTransaction(2, userKey1.Address(), big.NewInt(10000), params.TxEnergy, big.NewInt(100000000000), nil), signer, bankKey)
   541  	// send transactions in the wrong order, tx3 should be queued
   542  	test(tx3, true, light.TxStatus{Status: core.TxStatusQueued})
   543  	test(tx2, true, light.TxStatus{Status: core.TxStatusPending})
   544  	// query again, now tx3 should be pending too
   545  	test(tx3, false, light.TxStatus{Status: core.TxStatusPending})
   546  
   547  	// generate and add a block with tx1 and tx2 included
   548  	gchain, _ := core.GenerateChain(params.MainnetChainConfig, chain.GetBlockByNumber(0), cryptore.NewFaker(), server.db, 1, func(i int, block *core.BlockGen) {
   549  		block.AddTx(tx1)
   550  		block.AddTx(tx2)
   551  	})
   552  	if _, err := chain.InsertChain(gchain); err != nil {
   553  		panic(err)
   554  	}
   555  	// wait until TxPool processes the inserted block
   556  	for i := 0; i < 10; i++ {
   557  		if pending, _ := server.handler.txpool.Stats(); pending == 1 {
   558  			break
   559  		}
   560  		time.Sleep(100 * time.Millisecond)
   561  	}
   562  	if pending, _ := server.handler.txpool.Stats(); pending != 1 {
   563  		t.Fatalf("pending count mismatch: have %d, want 1", pending)
   564  	}
   565  	// Discard new block announcement
   566  	msg, _ := server.peer.app.ReadMsg()
   567  	msg.Discard()
   568  
   569  	// check if their status is included now
   570  	block1hash := rawdb.ReadCanonicalHash(server.db, 1)
   571  	test(tx1, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
   572  
   573  	test(tx2, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
   574  
   575  	// create a reorg that rolls them back
   576  	gchain, _ = core.GenerateChain(params.MainnetChainConfig, chain.GetBlockByNumber(0), cryptore.NewFaker(), server.db, 2, func(i int, block *core.BlockGen) {})
   577  	if _, err := chain.InsertChain(gchain); err != nil {
   578  		panic(err)
   579  	}
   580  	// wait until TxPool processes the reorg
   581  	for i := 0; i < 10; i++ {
   582  		if pending, _ := server.handler.txpool.Stats(); pending == 3 {
   583  			break
   584  		}
   585  		time.Sleep(100 * time.Millisecond)
   586  	}
   587  	if pending, _ := server.handler.txpool.Stats(); pending != 3 {
   588  		t.Fatalf("pending count mismatch: have %d, want 3", pending)
   589  	}
   590  	// Discard new block announcement
   591  	msg, _ = server.peer.app.ReadMsg()
   592  	msg.Discard()
   593  
   594  	// check if their status is pending again
   595  	test(tx1, false, light.TxStatus{Status: core.TxStatusPending})
   596  	test(tx2, false, light.TxStatus{Status: core.TxStatusPending})
   597  }
   598  
   599  func TestStopResumeLes3(t *testing.T) {
   600  	server, tearDown := newServerEnv(t, 0, 3, nil, true, true, testBufLimit/10)
   601  	defer tearDown()
   602  
   603  	server.handler.server.costTracker.testing = true
   604  
   605  	var (
   606  		reqID    uint64
   607  		expBuf   = testBufLimit
   608  		testCost = testBufLimit / 10
   609  	)
   610  	header := server.handler.blockchain.CurrentHeader()
   611  	req := func() {
   612  		reqID++
   613  		sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
   614  	}
   615  	for i := 1; i <= 5; i++ {
   616  		// send requests while we still have enough buffer and expect a response
   617  		for expBuf >= testCost {
   618  			req()
   619  			expBuf -= testCost
   620  			if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {
   621  				t.Errorf("expected response and failed: %v", err)
   622  			}
   623  		}
   624  		// send some more requests in excess and expect a single StopMsg
   625  		c := i
   626  		for c > 0 {
   627  			req()
   628  			c--
   629  		}
   630  		if err := p2p.ExpectMsg(server.peer.app, StopMsg, nil); err != nil {
   631  			t.Errorf("expected StopMsg and failed: %v", err)
   632  		}
   633  		// wait until the buffer is recharged by half of the limit
   634  		wait := testBufLimit / testBufRecharge / 2
   635  		server.clock.(*mclock.Simulated).Run(time.Millisecond * time.Duration(wait))
   636  
   637  		// expect a ResumeMsg with the partially recharged buffer value
   638  		expBuf += testBufRecharge * wait
   639  		if err := p2p.ExpectMsg(server.peer.app, ResumeMsg, expBuf); err != nil {
   640  			t.Errorf("expected ResumeMsg and failed: %v", err)
   641  		}
   642  	}
   643  }