github.com/wzbox/go-ethereum@v1.9.2/les/handler_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"encoding/binary"
    21  	"math/big"
    22  	"math/rand"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/common/mclock"
    28  	"github.com/ethereum/go-ethereum/consensus/ethash"
    29  	"github.com/ethereum/go-ethereum/core"
    30  	"github.com/ethereum/go-ethereum/core/rawdb"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/crypto"
    33  	"github.com/ethereum/go-ethereum/eth/downloader"
    34  	"github.com/ethereum/go-ethereum/light"
    35  	"github.com/ethereum/go-ethereum/p2p"
    36  	"github.com/ethereum/go-ethereum/params"
    37  	"github.com/ethereum/go-ethereum/rlp"
    38  	"github.com/ethereum/go-ethereum/trie"
    39  )
    40  
    41  func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {
    42  	type resp struct {
    43  		ReqID, BV uint64
    44  		Data      interface{}
    45  	}
    46  	return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
    47  }
    48  
    49  // Tests that block headers can be retrieved from a remote chain based on user queries.
    50  func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
    51  
    52  func testGetBlockHeaders(t *testing.T, protocol int) {
    53  	server, tearDown := newServerEnv(t, downloader.MaxHashFetch+15, protocol, nil)
    54  	defer tearDown()
    55  	bc := server.pm.blockchain.(*core.BlockChain)
    56  
    57  	// Create a "random" unknown hash for testing
    58  	var unknown common.Hash
    59  	for i := range unknown {
    60  		unknown[i] = byte(i)
    61  	}
    62  	// Create a batch of tests for various scenarios
    63  	limit := uint64(MaxHeaderFetch)
    64  	tests := []struct {
    65  		query  *getBlockHeadersData // The query to execute for header retrieval
    66  		expect []common.Hash        // The hashes of the block whose headers are expected
    67  	}{
    68  		// A single random block should be retrievable by hash and number too
    69  		{
    70  			&getBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
    71  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    72  		}, {
    73  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
    74  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    75  		},
    76  		// Multiple headers should be retrievable in both directions
    77  		{
    78  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
    79  			[]common.Hash{
    80  				bc.GetBlockByNumber(limit / 2).Hash(),
    81  				bc.GetBlockByNumber(limit/2 + 1).Hash(),
    82  				bc.GetBlockByNumber(limit/2 + 2).Hash(),
    83  			},
    84  		}, {
    85  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
    86  			[]common.Hash{
    87  				bc.GetBlockByNumber(limit / 2).Hash(),
    88  				bc.GetBlockByNumber(limit/2 - 1).Hash(),
    89  				bc.GetBlockByNumber(limit/2 - 2).Hash(),
    90  			},
    91  		},
    92  		// Multiple headers with skip lists should be retrievable
    93  		{
    94  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
    95  			[]common.Hash{
    96  				bc.GetBlockByNumber(limit / 2).Hash(),
    97  				bc.GetBlockByNumber(limit/2 + 4).Hash(),
    98  				bc.GetBlockByNumber(limit/2 + 8).Hash(),
    99  			},
   100  		}, {
   101  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
   102  			[]common.Hash{
   103  				bc.GetBlockByNumber(limit / 2).Hash(),
   104  				bc.GetBlockByNumber(limit/2 - 4).Hash(),
   105  				bc.GetBlockByNumber(limit/2 - 8).Hash(),
   106  			},
   107  		},
   108  		// The chain endpoints should be retrievable
   109  		{
   110  			&getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
   111  			[]common.Hash{bc.GetBlockByNumber(0).Hash()},
   112  		}, {
   113  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1},
   114  			[]common.Hash{bc.CurrentBlock().Hash()},
   115  		},
   116  		// Ensure protocol limits are honored
   117  		/*{
   118  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
   119  			bc.GetBlockHashesFromHash(bc.CurrentBlock().Hash(), limit),
   120  		},*/
   121  		// Check that requesting more than available is handled gracefully
   122  		{
   123  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
   124  			[]common.Hash{
   125  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   126  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(),
   127  			},
   128  		}, {
   129  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
   130  			[]common.Hash{
   131  				bc.GetBlockByNumber(4).Hash(),
   132  				bc.GetBlockByNumber(0).Hash(),
   133  			},
   134  		},
   135  		// Check that requesting more than available is handled gracefully, even if mid skip
   136  		{
   137  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
   138  			[]common.Hash{
   139  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   140  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(),
   141  			},
   142  		}, {
   143  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
   144  			[]common.Hash{
   145  				bc.GetBlockByNumber(4).Hash(),
   146  				bc.GetBlockByNumber(1).Hash(),
   147  			},
   148  		},
   149  		// Check that non existing headers aren't returned
   150  		{
   151  			&getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
   152  			[]common.Hash{},
   153  		}, {
   154  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1},
   155  			[]common.Hash{},
   156  		},
   157  	}
   158  	// Run each of the tests and verify the results against the chain
   159  	var reqID uint64
   160  	for i, tt := range tests {
   161  		// Collect the headers to expect in the response
   162  		var headers []*types.Header
   163  		for _, hash := range tt.expect {
   164  			headers = append(headers, bc.GetHeaderByHash(hash))
   165  		}
   166  		// Send the hash request and verify the response
   167  		reqID++
   168  		cost := server.tPeer.GetRequestCost(GetBlockHeadersMsg, int(tt.query.Amount))
   169  		sendRequest(server.tPeer.app, GetBlockHeadersMsg, reqID, cost, tt.query)
   170  		if err := expectResponse(server.tPeer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
   171  			t.Errorf("test %d: headers mismatch: %v", i, err)
   172  		}
   173  	}
   174  }
   175  
   176  // Tests that block contents can be retrieved from a remote chain based on their hashes.
   177  func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
   178  
   179  func testGetBlockBodies(t *testing.T, protocol int) {
   180  	server, tearDown := newServerEnv(t, downloader.MaxBlockFetch+15, protocol, nil)
   181  	defer tearDown()
   182  	bc := server.pm.blockchain.(*core.BlockChain)
   183  
   184  	// Create a batch of tests for various scenarios
   185  	limit := MaxBodyFetch
   186  	tests := []struct {
   187  		random    int           // Number of blocks to fetch randomly from the chain
   188  		explicit  []common.Hash // Explicitly requested blocks
   189  		available []bool        // Availability of explicitly requested blocks
   190  		expected  int           // Total number of existing blocks to expect
   191  	}{
   192  		{1, nil, nil, 1},         // A single random block should be retrievable
   193  		{10, nil, nil, 10},       // Multiple random blocks should be retrievable
   194  		{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
   195  		//{limit + 1, nil, nil, limit},                                  // No more than the possible block count should be returned
   196  		{0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1},      // The genesis block should be retrievable
   197  		{0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
   198  		{0, []common.Hash{{}}, []bool{false}, 0},                      // A non existent block should not be returned
   199  
   200  		// Existing and non-existing blocks interleaved should not cause problems
   201  		{0, []common.Hash{
   202  			{},
   203  			bc.GetBlockByNumber(1).Hash(),
   204  			{},
   205  			bc.GetBlockByNumber(10).Hash(),
   206  			{},
   207  			bc.GetBlockByNumber(100).Hash(),
   208  			{},
   209  		}, []bool{false, true, false, true, false, true, false}, 3},
   210  	}
   211  	// Run each of the tests and verify the results against the chain
   212  	var reqID uint64
   213  	for i, tt := range tests {
   214  		// Collect the hashes to request, and the response to expect
   215  		var hashes []common.Hash
   216  		seen := make(map[int64]bool)
   217  		var bodies []*types.Body
   218  
   219  		for j := 0; j < tt.random; j++ {
   220  			for {
   221  				num := rand.Int63n(int64(bc.CurrentBlock().NumberU64()))
   222  				if !seen[num] {
   223  					seen[num] = true
   224  
   225  					block := bc.GetBlockByNumber(uint64(num))
   226  					hashes = append(hashes, block.Hash())
   227  					if len(bodies) < tt.expected {
   228  						bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   229  					}
   230  					break
   231  				}
   232  			}
   233  		}
   234  		for j, hash := range tt.explicit {
   235  			hashes = append(hashes, hash)
   236  			if tt.available[j] && len(bodies) < tt.expected {
   237  				block := bc.GetBlockByHash(hash)
   238  				bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   239  			}
   240  		}
   241  		reqID++
   242  		// Send the hash request and verify the response
   243  		cost := server.tPeer.GetRequestCost(GetBlockBodiesMsg, len(hashes))
   244  		sendRequest(server.tPeer.app, GetBlockBodiesMsg, reqID, cost, hashes)
   245  		if err := expectResponse(server.tPeer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
   246  			t.Errorf("test %d: bodies mismatch: %v", i, err)
   247  		}
   248  	}
   249  }
   250  
   251  // Tests that the contract codes can be retrieved based on account addresses.
   252  func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
   253  
   254  func testGetCode(t *testing.T, protocol int) {
   255  	// Assemble the test environment
   256  	server, tearDown := newServerEnv(t, 4, protocol, nil)
   257  	defer tearDown()
   258  	bc := server.pm.blockchain.(*core.BlockChain)
   259  
   260  	var codereqs []*CodeReq
   261  	var codes [][]byte
   262  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   263  		header := bc.GetHeaderByNumber(i)
   264  		req := &CodeReq{
   265  			BHash:  header.Hash(),
   266  			AccKey: crypto.Keccak256(testContractAddr[:]),
   267  		}
   268  		codereqs = append(codereqs, req)
   269  		if i >= testContractDeployed {
   270  			codes = append(codes, testContractCodeDeployed)
   271  		}
   272  	}
   273  
   274  	cost := server.tPeer.GetRequestCost(GetCodeMsg, len(codereqs))
   275  	sendRequest(server.tPeer.app, GetCodeMsg, 42, cost, codereqs)
   276  	if err := expectResponse(server.tPeer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
   277  		t.Errorf("codes mismatch: %v", err)
   278  	}
   279  }
   280  
   281  // Tests that the stale contract codes can't be retrieved based on account addresses.
   282  func TestGetStaleCodeLes2(t *testing.T) { testGetStaleCode(t, 2) }
   283  func TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) }
   284  
   285  func testGetStaleCode(t *testing.T, protocol int) {
   286  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil)
   287  	defer tearDown()
   288  	bc := server.pm.blockchain.(*core.BlockChain)
   289  
   290  	check := func(number uint64, expected [][]byte) {
   291  		req := &CodeReq{
   292  			BHash:  bc.GetHeaderByNumber(number).Hash(),
   293  			AccKey: crypto.Keccak256(testContractAddr[:]),
   294  		}
   295  		cost := server.tPeer.GetRequestCost(GetCodeMsg, 1)
   296  		sendRequest(server.tPeer.app, GetCodeMsg, 42, cost, []*CodeReq{req})
   297  		if err := expectResponse(server.tPeer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
   298  			t.Errorf("codes mismatch: %v", err)
   299  		}
   300  	}
   301  	check(0, [][]byte{})                                                          // Non-exist contract
   302  	check(testContractDeployed, [][]byte{})                                       // Stale contract
   303  	check(bc.CurrentHeader().Number.Uint64(), [][]byte{testContractCodeDeployed}) // Fresh contract
   304  }
   305  
   306  // Tests that the transaction receipts can be retrieved based on hashes.
   307  func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
   308  
   309  func testGetReceipt(t *testing.T, protocol int) {
   310  	// Assemble the test environment
   311  	server, tearDown := newServerEnv(t, 4, protocol, nil)
   312  	defer tearDown()
   313  	bc := server.pm.blockchain.(*core.BlockChain)
   314  
   315  	// Collect the hashes to request, and the response to expect
   316  	var receipts []types.Receipts
   317  	var hashes []common.Hash
   318  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   319  		block := bc.GetBlockByNumber(i)
   320  
   321  		hashes = append(hashes, block.Hash())
   322  		receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64()))
   323  	}
   324  	// Send the hash request and verify the response
   325  	cost := server.tPeer.GetRequestCost(GetReceiptsMsg, len(hashes))
   326  	sendRequest(server.tPeer.app, GetReceiptsMsg, 42, cost, hashes)
   327  	if err := expectResponse(server.tPeer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
   328  		t.Errorf("receipts mismatch: %v", err)
   329  	}
   330  }
   331  
   332  // Tests that trie merkle proofs can be retrieved
   333  func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
   334  
   335  func testGetProofs(t *testing.T, protocol int) {
   336  	// Assemble the test environment
   337  	server, tearDown := newServerEnv(t, 4, protocol, nil)
   338  	defer tearDown()
   339  	bc := server.pm.blockchain.(*core.BlockChain)
   340  
   341  	var proofreqs []ProofReq
   342  	proofsV2 := light.NewNodeSet()
   343  
   344  	accounts := []common.Address{bankAddr, userAddr1, userAddr2, {}}
   345  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   346  		header := bc.GetHeaderByNumber(i)
   347  		trie, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   348  
   349  		for _, acc := range accounts {
   350  			req := ProofReq{
   351  				BHash: header.Hash(),
   352  				Key:   crypto.Keccak256(acc[:]),
   353  			}
   354  			proofreqs = append(proofreqs, req)
   355  			trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
   356  		}
   357  	}
   358  	// Send the proof request and verify the response
   359  	cost := server.tPeer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
   360  	sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, proofreqs)
   361  	if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
   362  		t.Errorf("proofs mismatch: %v", err)
   363  	}
   364  }
   365  
   366  // Tests that the stale contract codes can't be retrieved based on account addresses.
   367  func TestGetStaleProofLes2(t *testing.T) { testGetStaleProof(t, 2) }
   368  func TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) }
   369  
   370  func testGetStaleProof(t *testing.T, protocol int) {
   371  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil)
   372  	defer tearDown()
   373  	bc := server.pm.blockchain.(*core.BlockChain)
   374  
   375  	check := func(number uint64, wantOK bool) {
   376  		var (
   377  			header  = bc.GetHeaderByNumber(number)
   378  			account = crypto.Keccak256(userAddr1.Bytes())
   379  		)
   380  		req := &ProofReq{
   381  			BHash: header.Hash(),
   382  			Key:   account,
   383  		}
   384  		cost := server.tPeer.GetRequestCost(GetProofsV2Msg, 1)
   385  		sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, []*ProofReq{req})
   386  
   387  		var expected []rlp.RawValue
   388  		if wantOK {
   389  			proofsV2 := light.NewNodeSet()
   390  			t, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   391  			t.Prove(account, 0, proofsV2)
   392  			expected = proofsV2.NodeList()
   393  		}
   394  		if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
   395  			t.Errorf("codes mismatch: %v", err)
   396  		}
   397  	}
   398  	check(0, false)                                 // Non-exist proof
   399  	check(2, false)                                 // Stale proof
   400  	check(bc.CurrentHeader().Number.Uint64(), true) // Fresh proof
   401  }
   402  
   403  // Tests that CHT proofs can be correctly retrieved.
   404  func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
   405  
   406  func testGetCHTProofs(t *testing.T, protocol int) {
   407  	config := light.TestServerIndexerConfig
   408  
   409  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   410  		for {
   411  			cs, _, _ := cIndexer.Sections()
   412  			if cs >= 1 {
   413  				break
   414  			}
   415  			time.Sleep(10 * time.Millisecond)
   416  		}
   417  	}
   418  	server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers)
   419  	defer tearDown()
   420  	bc := server.pm.blockchain.(*core.BlockChain)
   421  
   422  	// Assemble the proofs from the different protocols
   423  	header := bc.GetHeaderByNumber(config.ChtSize - 1)
   424  	rlp, _ := rlp.EncodeToBytes(header)
   425  
   426  	key := make([]byte, 8)
   427  	binary.BigEndian.PutUint64(key, config.ChtSize-1)
   428  
   429  	proofsV2 := HelperTrieResps{
   430  		AuxData: [][]byte{rlp},
   431  	}
   432  	root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
   433  	trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
   434  	trie.Prove(key, 0, &proofsV2.Proofs)
   435  	// Assemble the requests for the different protocols
   436  	requestsV2 := []HelperTrieReq{{
   437  		Type:    htCanonical,
   438  		TrieIdx: 0,
   439  		Key:     key,
   440  		AuxReq:  auxHeader,
   441  	}}
   442  	// Send the proof request and verify the response
   443  	cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
   444  	sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
   445  	if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
   446  		t.Errorf("proofs mismatch: %v", err)
   447  	}
   448  }
   449  
   450  // Tests that bloombits proofs can be correctly retrieved.
   451  func TestGetBloombitsProofs(t *testing.T) {
   452  	config := light.TestServerIndexerConfig
   453  
   454  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   455  		for {
   456  			bts, _, _ := btIndexer.Sections()
   457  			if bts >= 1 {
   458  				break
   459  			}
   460  			time.Sleep(10 * time.Millisecond)
   461  		}
   462  	}
   463  	server, tearDown := newServerEnv(t, int(config.BloomTrieSize+config.BloomTrieConfirms), 2, waitIndexers)
   464  	defer tearDown()
   465  	bc := server.pm.blockchain.(*core.BlockChain)
   466  
   467  	// Request and verify each bit of the bloom bits proofs
   468  	for bit := 0; bit < 2048; bit++ {
   469  		// Assemble the request and proofs for the bloombits
   470  		key := make([]byte, 10)
   471  
   472  		binary.BigEndian.PutUint16(key[:2], uint16(bit))
   473  		// Only the first bloom section has data.
   474  		binary.BigEndian.PutUint64(key[2:], 0)
   475  
   476  		requests := []HelperTrieReq{{
   477  			Type:    htBloomBits,
   478  			TrieIdx: 0,
   479  			Key:     key,
   480  		}}
   481  		var proofs HelperTrieResps
   482  
   483  		root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
   484  		trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
   485  		trie.Prove(key, 0, &proofs.Proofs)
   486  
   487  		// Send the proof request and verify the response
   488  		cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requests))
   489  		sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requests)
   490  		if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
   491  			t.Errorf("bit %d: proofs mismatch: %v", bit, err)
   492  		}
   493  	}
   494  }
   495  
   496  func TestTransactionStatusLes2(t *testing.T) {
   497  	server, tearDown := newServerEnv(t, 0, 2, nil)
   498  	defer tearDown()
   499  	server.pm.addTxsSync = true
   500  
   501  	chain := server.pm.blockchain.(*core.BlockChain)
   502  	config := core.DefaultTxPoolConfig
   503  	config.Journal = ""
   504  	txpool := core.NewTxPool(config, params.TestChainConfig, chain)
   505  	server.pm.txpool = txpool
   506  	peer, _ := newTestPeer(t, "peer", 2, server.pm, true, 0)
   507  	defer peer.close()
   508  
   509  	var reqID uint64
   510  
   511  	test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
   512  		reqID++
   513  		if send {
   514  			cost := server.tPeer.GetRequestCost(SendTxV2Msg, 1)
   515  			sendRequest(server.tPeer.app, SendTxV2Msg, reqID, cost, types.Transactions{tx})
   516  		} else {
   517  			cost := server.tPeer.GetRequestCost(GetTxStatusMsg, 1)
   518  			sendRequest(server.tPeer.app, GetTxStatusMsg, reqID, cost, []common.Hash{tx.Hash()})
   519  		}
   520  		if err := expectResponse(server.tPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
   521  			t.Errorf("transaction status mismatch")
   522  		}
   523  	}
   524  
   525  	signer := types.HomesteadSigner{}
   526  
   527  	// test error status by sending an underpriced transaction
   528  	tx0, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)
   529  	test(tx0, true, light.TxStatus{Status: core.TxStatusUnknown, Error: core.ErrUnderpriced.Error()})
   530  
   531  	tx1, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   532  	test(tx1, false, light.TxStatus{Status: core.TxStatusUnknown}) // query before sending, should be unknown
   533  	test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // send valid processable tx, should return pending
   534  	test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // adding it again should not return an error
   535  
   536  	tx2, _ := types.SignTx(types.NewTransaction(1, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   537  	tx3, _ := types.SignTx(types.NewTransaction(2, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   538  	// send transactions in the wrong order, tx3 should be queued
   539  	test(tx3, true, light.TxStatus{Status: core.TxStatusQueued})
   540  	test(tx2, true, light.TxStatus{Status: core.TxStatusPending})
   541  	// query again, now tx3 should be pending too
   542  	test(tx3, false, light.TxStatus{Status: core.TxStatusPending})
   543  
   544  	// generate and add a block with tx1 and tx2 included
   545  	gchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 1, func(i int, block *core.BlockGen) {
   546  		block.AddTx(tx1)
   547  		block.AddTx(tx2)
   548  	})
   549  	if _, err := chain.InsertChain(gchain); err != nil {
   550  		panic(err)
   551  	}
   552  	// wait until TxPool processes the inserted block
   553  	for i := 0; i < 10; i++ {
   554  		if pending, _ := txpool.Stats(); pending == 1 {
   555  			break
   556  		}
   557  		time.Sleep(100 * time.Millisecond)
   558  	}
   559  	if pending, _ := txpool.Stats(); pending != 1 {
   560  		t.Fatalf("pending count mismatch: have %d, want 1", pending)
   561  	}
   562  
   563  	// check if their status is included now
   564  	block1hash := rawdb.ReadCanonicalHash(server.db, 1)
   565  	test(tx1, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
   566  	test(tx2, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
   567  
   568  	// create a reorg that rolls them back
   569  	gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 2, func(i int, block *core.BlockGen) {})
   570  	if _, err := chain.InsertChain(gchain); err != nil {
   571  		panic(err)
   572  	}
   573  	// wait until TxPool processes the reorg
   574  	for i := 0; i < 10; i++ {
   575  		if pending, _ := txpool.Stats(); pending == 3 {
   576  			break
   577  		}
   578  		time.Sleep(100 * time.Millisecond)
   579  	}
   580  	if pending, _ := txpool.Stats(); pending != 3 {
   581  		t.Fatalf("pending count mismatch: have %d, want 3", pending)
   582  	}
   583  	// check if their status is pending again
   584  	test(tx1, false, light.TxStatus{Status: core.TxStatusPending})
   585  	test(tx2, false, light.TxStatus{Status: core.TxStatusPending})
   586  }
   587  
   588  func TestStopResumeLes3(t *testing.T) {
   589  	db := rawdb.NewMemoryDatabase()
   590  	clock := &mclock.Simulated{}
   591  	testCost := testBufLimit / 10
   592  	pm, _, err := newTestProtocolManager(false, 0, nil, nil, nil, db, nil, 0, testCost, clock)
   593  	if err != nil {
   594  		t.Fatalf("Failed to create protocol manager: %v", err)
   595  	}
   596  	peer, _ := newTestPeer(t, "peer", 3, pm, true, testCost)
   597  	defer peer.close()
   598  
   599  	expBuf := testBufLimit
   600  	var reqID uint64
   601  
   602  	header := pm.blockchain.CurrentHeader()
   603  	req := func() {
   604  		reqID++
   605  		sendRequest(peer.app, GetBlockHeadersMsg, reqID, testCost, &getBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
   606  	}
   607  
   608  	for i := 1; i <= 5; i++ {
   609  		// send requests while we still have enough buffer and expect a response
   610  		for expBuf >= testCost {
   611  			req()
   612  			expBuf -= testCost
   613  			if err := expectResponse(peer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {
   614  				t.Fatalf("expected response and failed: %v", err)
   615  			}
   616  		}
   617  		// send some more requests in excess and expect a single StopMsg
   618  		c := i
   619  		for c > 0 {
   620  			req()
   621  			c--
   622  		}
   623  		if err := p2p.ExpectMsg(peer.app, StopMsg, nil); err != nil {
   624  			t.Errorf("expected StopMsg and failed: %v", err)
   625  		}
   626  		// wait until the buffer is recharged by half of the limit
   627  		wait := testBufLimit / testBufRecharge / 2
   628  		clock.Run(time.Millisecond * time.Duration(wait))
   629  		// expect a ResumeMsg with the partially recharged buffer value
   630  		expBuf += testBufRecharge * wait
   631  		if err := p2p.ExpectMsg(peer.app, ResumeMsg, expBuf); err != nil {
   632  			t.Errorf("expected ResumeMsg and failed: %v", err)
   633  		}
   634  	}
   635  }