github.com/guiltylotus/go-ethereum@v1.9.7/les/handler_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"encoding/binary"
    21  	"math/big"
    22  	"math/rand"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/common/mclock"
    28  	"github.com/ethereum/go-ethereum/consensus/ethash"
    29  	"github.com/ethereum/go-ethereum/core"
    30  	"github.com/ethereum/go-ethereum/core/rawdb"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/crypto"
    33  	"github.com/ethereum/go-ethereum/eth/downloader"
    34  	"github.com/ethereum/go-ethereum/light"
    35  	"github.com/ethereum/go-ethereum/p2p"
    36  	"github.com/ethereum/go-ethereum/params"
    37  	"github.com/ethereum/go-ethereum/rlp"
    38  	"github.com/ethereum/go-ethereum/trie"
    39  )
    40  
    41  func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {
    42  	type resp struct {
    43  		ReqID, BV uint64
    44  		Data      interface{}
    45  	}
    46  	return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
    47  }
    48  
    49  // Tests that block headers can be retrieved from a remote chain based on user queries.
    50  func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
    51  func TestGetBlockHeadersLes3(t *testing.T) { testGetBlockHeaders(t, 3) }
    52  
    53  func testGetBlockHeaders(t *testing.T, protocol int) {
    54  	server, tearDown := newServerEnv(t, downloader.MaxHashFetch+15, protocol, nil, false, true, 0)
    55  	defer tearDown()
    56  
    57  	bc := server.handler.blockchain
    58  
    59  	// Create a "random" unknown hash for testing
    60  	var unknown common.Hash
    61  	for i := range unknown {
    62  		unknown[i] = byte(i)
    63  	}
    64  	// Create a batch of tests for various scenarios
    65  	limit := uint64(MaxHeaderFetch)
    66  	tests := []struct {
    67  		query  *getBlockHeadersData // The query to execute for header retrieval
    68  		expect []common.Hash        // The hashes of the block whose headers are expected
    69  	}{
    70  		// A single random block should be retrievable by hash and number too
    71  		{
    72  			&getBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
    73  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    74  		}, {
    75  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
    76  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    77  		},
    78  		// Multiple headers should be retrievable in both directions
    79  		{
    80  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
    81  			[]common.Hash{
    82  				bc.GetBlockByNumber(limit / 2).Hash(),
    83  				bc.GetBlockByNumber(limit/2 + 1).Hash(),
    84  				bc.GetBlockByNumber(limit/2 + 2).Hash(),
    85  			},
    86  		}, {
    87  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
    88  			[]common.Hash{
    89  				bc.GetBlockByNumber(limit / 2).Hash(),
    90  				bc.GetBlockByNumber(limit/2 - 1).Hash(),
    91  				bc.GetBlockByNumber(limit/2 - 2).Hash(),
    92  			},
    93  		},
    94  		// Multiple headers with skip lists should be retrievable
    95  		{
    96  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
    97  			[]common.Hash{
    98  				bc.GetBlockByNumber(limit / 2).Hash(),
    99  				bc.GetBlockByNumber(limit/2 + 4).Hash(),
   100  				bc.GetBlockByNumber(limit/2 + 8).Hash(),
   101  			},
   102  		}, {
   103  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
   104  			[]common.Hash{
   105  				bc.GetBlockByNumber(limit / 2).Hash(),
   106  				bc.GetBlockByNumber(limit/2 - 4).Hash(),
   107  				bc.GetBlockByNumber(limit/2 - 8).Hash(),
   108  			},
   109  		},
   110  		// The chain endpoints should be retrievable
   111  		{
   112  			&getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
   113  			[]common.Hash{bc.GetBlockByNumber(0).Hash()},
   114  		}, {
   115  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1},
   116  			[]common.Hash{bc.CurrentBlock().Hash()},
   117  		},
   118  		// Ensure protocol limits are honored
   119  		//{
   120  		//	&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
   121  		//	[]common.Hash{},
   122  		//},
   123  		// Check that requesting more than available is handled gracefully
   124  		{
   125  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
   126  			[]common.Hash{
   127  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   128  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(),
   129  			},
   130  		}, {
   131  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
   132  			[]common.Hash{
   133  				bc.GetBlockByNumber(4).Hash(),
   134  				bc.GetBlockByNumber(0).Hash(),
   135  			},
   136  		},
   137  		// Check that requesting more than available is handled gracefully, even if mid skip
   138  		{
   139  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
   140  			[]common.Hash{
   141  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   142  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(),
   143  			},
   144  		}, {
   145  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
   146  			[]common.Hash{
   147  				bc.GetBlockByNumber(4).Hash(),
   148  				bc.GetBlockByNumber(1).Hash(),
   149  			},
   150  		},
   151  		// Check that non existing headers aren't returned
   152  		{
   153  			&getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
   154  			[]common.Hash{},
   155  		}, {
   156  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1},
   157  			[]common.Hash{},
   158  		},
   159  	}
   160  	// Run each of the tests and verify the results against the chain
   161  	var reqID uint64
   162  	for i, tt := range tests {
   163  		// Collect the headers to expect in the response
   164  		var headers []*types.Header
   165  		for _, hash := range tt.expect {
   166  			headers = append(headers, bc.GetHeaderByHash(hash))
   167  		}
   168  		// Send the hash request and verify the response
   169  		reqID++
   170  
   171  		cost := server.peer.peer.GetRequestCost(GetBlockHeadersMsg, int(tt.query.Amount))
   172  		sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, cost, tt.query)
   173  		if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
   174  			t.Errorf("test %d: headers mismatch: %v", i, err)
   175  		}
   176  	}
   177  }
   178  
   179  // Tests that block contents can be retrieved from a remote chain based on their hashes.
   180  func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
   181  func TestGetBlockBodiesLes3(t *testing.T) { testGetBlockBodies(t, 3) }
   182  
   183  func testGetBlockBodies(t *testing.T, protocol int) {
   184  	server, tearDown := newServerEnv(t, downloader.MaxBlockFetch+15, protocol, nil, false, true, 0)
   185  	defer tearDown()
   186  
   187  	bc := server.handler.blockchain
   188  
   189  	// Create a batch of tests for various scenarios
   190  	limit := MaxBodyFetch
   191  	tests := []struct {
   192  		random    int           // Number of blocks to fetch randomly from the chain
   193  		explicit  []common.Hash // Explicitly requested blocks
   194  		available []bool        // Availability of explicitly requested blocks
   195  		expected  int           // Total number of existing blocks to expect
   196  	}{
   197  		{1, nil, nil, 1},         // A single random block should be retrievable
   198  		{10, nil, nil, 10},       // Multiple random blocks should be retrievable
   199  		{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
   200  		//{limit + 1, nil, nil, limit},                                  // No more than the possible block count should be returned
   201  		{0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1},      // The genesis block should be retrievable
   202  		{0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
   203  		{0, []common.Hash{{}}, []bool{false}, 0},                      // A non existent block should not be returned
   204  
   205  		// Existing and non-existing blocks interleaved should not cause problems
   206  		{0, []common.Hash{
   207  			{},
   208  			bc.GetBlockByNumber(1).Hash(),
   209  			{},
   210  			bc.GetBlockByNumber(10).Hash(),
   211  			{},
   212  			bc.GetBlockByNumber(100).Hash(),
   213  			{},
   214  		}, []bool{false, true, false, true, false, true, false}, 3},
   215  	}
   216  	// Run each of the tests and verify the results against the chain
   217  	var reqID uint64
   218  	for i, tt := range tests {
   219  		// Collect the hashes to request, and the response to expect
   220  		var hashes []common.Hash
   221  		seen := make(map[int64]bool)
   222  		var bodies []*types.Body
   223  
   224  		for j := 0; j < tt.random; j++ {
   225  			for {
   226  				num := rand.Int63n(int64(bc.CurrentBlock().NumberU64()))
   227  				if !seen[num] {
   228  					seen[num] = true
   229  
   230  					block := bc.GetBlockByNumber(uint64(num))
   231  					hashes = append(hashes, block.Hash())
   232  					if len(bodies) < tt.expected {
   233  						bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   234  					}
   235  					break
   236  				}
   237  			}
   238  		}
   239  		for j, hash := range tt.explicit {
   240  			hashes = append(hashes, hash)
   241  			if tt.available[j] && len(bodies) < tt.expected {
   242  				block := bc.GetBlockByHash(hash)
   243  				bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   244  			}
   245  		}
   246  		reqID++
   247  
   248  		// Send the hash request and verify the response
   249  		cost := server.peer.peer.GetRequestCost(GetBlockBodiesMsg, len(hashes))
   250  		sendRequest(server.peer.app, GetBlockBodiesMsg, reqID, cost, hashes)
   251  		if err := expectResponse(server.peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
   252  			t.Errorf("test %d: bodies mismatch: %v", i, err)
   253  		}
   254  	}
   255  }
   256  
   257  // Tests that the contract codes can be retrieved based on account addresses.
   258  func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
   259  func TestGetCodeLes3(t *testing.T) { testGetCode(t, 3) }
   260  
   261  func testGetCode(t *testing.T, protocol int) {
   262  	// Assemble the test environment
   263  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   264  	defer tearDown()
   265  	bc := server.handler.blockchain
   266  
   267  	var codereqs []*CodeReq
   268  	var codes [][]byte
   269  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   270  		header := bc.GetHeaderByNumber(i)
   271  		req := &CodeReq{
   272  			BHash:  header.Hash(),
   273  			AccKey: crypto.Keccak256(testContractAddr[:]),
   274  		}
   275  		codereqs = append(codereqs, req)
   276  		if i >= testContractDeployed {
   277  			codes = append(codes, testContractCodeDeployed)
   278  		}
   279  	}
   280  
   281  	cost := server.peer.peer.GetRequestCost(GetCodeMsg, len(codereqs))
   282  	sendRequest(server.peer.app, GetCodeMsg, 42, cost, codereqs)
   283  	if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
   284  		t.Errorf("codes mismatch: %v", err)
   285  	}
   286  }
   287  
   288  // Tests that the stale contract codes can't be retrieved based on account addresses.
   289  func TestGetStaleCodeLes2(t *testing.T) { testGetStaleCode(t, 2) }
   290  func TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) }
   291  
   292  func testGetStaleCode(t *testing.T, protocol int) {
   293  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
   294  	defer tearDown()
   295  	bc := server.handler.blockchain
   296  
   297  	check := func(number uint64, expected [][]byte) {
   298  		req := &CodeReq{
   299  			BHash:  bc.GetHeaderByNumber(number).Hash(),
   300  			AccKey: crypto.Keccak256(testContractAddr[:]),
   301  		}
   302  		cost := server.peer.peer.GetRequestCost(GetCodeMsg, 1)
   303  		sendRequest(server.peer.app, GetCodeMsg, 42, cost, []*CodeReq{req})
   304  		if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
   305  			t.Errorf("codes mismatch: %v", err)
   306  		}
   307  	}
   308  	check(0, [][]byte{})                                                          // Non-exist contract
   309  	check(testContractDeployed, [][]byte{})                                       // Stale contract
   310  	check(bc.CurrentHeader().Number.Uint64(), [][]byte{testContractCodeDeployed}) // Fresh contract
   311  }
   312  
   313  // Tests that the transaction receipts can be retrieved based on hashes.
   314  func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
   315  func TestGetReceiptLes3(t *testing.T) { testGetReceipt(t, 3) }
   316  
   317  func testGetReceipt(t *testing.T, protocol int) {
   318  	// Assemble the test environment
   319  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   320  	defer tearDown()
   321  
   322  	bc := server.handler.blockchain
   323  
   324  	// Collect the hashes to request, and the response to expect
   325  	var receipts []types.Receipts
   326  	var hashes []common.Hash
   327  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   328  		block := bc.GetBlockByNumber(i)
   329  
   330  		hashes = append(hashes, block.Hash())
   331  		receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64()))
   332  	}
   333  	// Send the hash request and verify the response
   334  	cost := server.peer.peer.GetRequestCost(GetReceiptsMsg, len(hashes))
   335  	sendRequest(server.peer.app, GetReceiptsMsg, 42, cost, hashes)
   336  	if err := expectResponse(server.peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
   337  		t.Errorf("receipts mismatch: %v", err)
   338  	}
   339  }
   340  
   341  // Tests that trie merkle proofs can be retrieved
   342  func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
   343  func TestGetProofsLes3(t *testing.T) { testGetProofs(t, 3) }
   344  
   345  func testGetProofs(t *testing.T, protocol int) {
   346  	// Assemble the test environment
   347  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   348  	defer tearDown()
   349  
   350  	bc := server.handler.blockchain
   351  
   352  	var proofreqs []ProofReq
   353  	proofsV2 := light.NewNodeSet()
   354  
   355  	accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
   356  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   357  		header := bc.GetHeaderByNumber(i)
   358  		trie, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   359  
   360  		for _, acc := range accounts {
   361  			req := ProofReq{
   362  				BHash: header.Hash(),
   363  				Key:   crypto.Keccak256(acc[:]),
   364  			}
   365  			proofreqs = append(proofreqs, req)
   366  			trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
   367  		}
   368  	}
   369  	// Send the proof request and verify the response
   370  	cost := server.peer.peer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
   371  	sendRequest(server.peer.app, GetProofsV2Msg, 42, cost, proofreqs)
   372  	if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
   373  		t.Errorf("proofs mismatch: %v", err)
   374  	}
   375  }
   376  
   377  // Tests that the stale contract codes can't be retrieved based on account addresses.
   378  func TestGetStaleProofLes2(t *testing.T) { testGetStaleProof(t, 2) }
   379  func TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) }
   380  
   381  func testGetStaleProof(t *testing.T, protocol int) {
   382  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
   383  	defer tearDown()
   384  	bc := server.handler.blockchain
   385  
   386  	check := func(number uint64, wantOK bool) {
   387  		var (
   388  			header  = bc.GetHeaderByNumber(number)
   389  			account = crypto.Keccak256(userAddr1.Bytes())
   390  		)
   391  		req := &ProofReq{
   392  			BHash: header.Hash(),
   393  			Key:   account,
   394  		}
   395  		cost := server.peer.peer.GetRequestCost(GetProofsV2Msg, 1)
   396  		sendRequest(server.peer.app, GetProofsV2Msg, 42, cost, []*ProofReq{req})
   397  
   398  		var expected []rlp.RawValue
   399  		if wantOK {
   400  			proofsV2 := light.NewNodeSet()
   401  			t, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   402  			t.Prove(account, 0, proofsV2)
   403  			expected = proofsV2.NodeList()
   404  		}
   405  		if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
   406  			t.Errorf("codes mismatch: %v", err)
   407  		}
   408  	}
   409  	check(0, false)                                 // Non-exist proof
   410  	check(2, false)                                 // Stale proof
   411  	check(bc.CurrentHeader().Number.Uint64(), true) // Fresh proof
   412  }
   413  
   414  // Tests that CHT proofs can be correctly retrieved.
   415  func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
   416  func TestGetCHTProofsLes3(t *testing.T) { testGetCHTProofs(t, 3) }
   417  
   418  func testGetCHTProofs(t *testing.T, protocol int) {
   419  	config := light.TestServerIndexerConfig
   420  
   421  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   422  		for {
   423  			cs, _, _ := cIndexer.Sections()
   424  			if cs >= 1 {
   425  				break
   426  			}
   427  			time.Sleep(10 * time.Millisecond)
   428  		}
   429  	}
   430  	server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers, false, true, 0)
   431  	defer tearDown()
   432  
   433  	bc := server.handler.blockchain
   434  
   435  	// Assemble the proofs from the different protocols
   436  	header := bc.GetHeaderByNumber(config.ChtSize - 1)
   437  	rlp, _ := rlp.EncodeToBytes(header)
   438  
   439  	key := make([]byte, 8)
   440  	binary.BigEndian.PutUint64(key, config.ChtSize-1)
   441  
   442  	proofsV2 := HelperTrieResps{
   443  		AuxData: [][]byte{rlp},
   444  	}
   445  	root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
   446  	trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
   447  	trie.Prove(key, 0, &proofsV2.Proofs)
   448  	// Assemble the requests for the different protocols
   449  	requestsV2 := []HelperTrieReq{{
   450  		Type:    htCanonical,
   451  		TrieIdx: 0,
   452  		Key:     key,
   453  		AuxReq:  auxHeader,
   454  	}}
   455  	// Send the proof request and verify the response
   456  	cost := server.peer.peer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
   457  	sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
   458  	if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
   459  		t.Errorf("proofs mismatch: %v", err)
   460  	}
   461  }
   462  
   463  func TestGetBloombitsProofsLes2(t *testing.T) { testGetBloombitsProofs(t, 2) }
   464  func TestGetBloombitsProofsLes3(t *testing.T) { testGetBloombitsProofs(t, 3) }
   465  
   466  // Tests that bloombits proofs can be correctly retrieved.
   467  func testGetBloombitsProofs(t *testing.T, protocol int) {
   468  	config := light.TestServerIndexerConfig
   469  
   470  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   471  		for {
   472  			bts, _, _ := btIndexer.Sections()
   473  			if bts >= 1 {
   474  				break
   475  			}
   476  			time.Sleep(10 * time.Millisecond)
   477  		}
   478  	}
   479  	server, tearDown := newServerEnv(t, int(config.BloomTrieSize+config.BloomTrieConfirms), protocol, waitIndexers, false, true, 0)
   480  	defer tearDown()
   481  
   482  	bc := server.handler.blockchain
   483  
   484  	// Request and verify each bit of the bloom bits proofs
   485  	for bit := 0; bit < 2048; bit++ {
   486  		// Assemble the request and proofs for the bloombits
   487  		key := make([]byte, 10)
   488  
   489  		binary.BigEndian.PutUint16(key[:2], uint16(bit))
   490  		// Only the first bloom section has data.
   491  		binary.BigEndian.PutUint64(key[2:], 0)
   492  
   493  		requests := []HelperTrieReq{{
   494  			Type:    htBloomBits,
   495  			TrieIdx: 0,
   496  			Key:     key,
   497  		}}
   498  		var proofs HelperTrieResps
   499  
   500  		root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
   501  		trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
   502  		trie.Prove(key, 0, &proofs.Proofs)
   503  
   504  		// Send the proof request and verify the response
   505  		cost := server.peer.peer.GetRequestCost(GetHelperTrieProofsMsg, len(requests))
   506  		sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, cost, requests)
   507  		if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
   508  			t.Errorf("bit %d: proofs mismatch: %v", bit, err)
   509  		}
   510  	}
   511  }
   512  
   513  func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, 2) }
   514  func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, 3) }
   515  
   516  func testTransactionStatus(t *testing.T, protocol int) {
   517  	server, tearDown := newServerEnv(t, 0, protocol, nil, false, true, 0)
   518  	defer tearDown()
   519  	server.handler.addTxsSync = true
   520  
   521  	chain := server.handler.blockchain
   522  
   523  	var reqID uint64
   524  
   525  	test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
   526  		reqID++
   527  		if send {
   528  			cost := server.peer.peer.GetRequestCost(SendTxV2Msg, 1)
   529  			sendRequest(server.peer.app, SendTxV2Msg, reqID, cost, types.Transactions{tx})
   530  		} else {
   531  			cost := server.peer.peer.GetRequestCost(GetTxStatusMsg, 1)
   532  			sendRequest(server.peer.app, GetTxStatusMsg, reqID, cost, []common.Hash{tx.Hash()})
   533  		}
   534  		if err := expectResponse(server.peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
   535  			t.Errorf("transaction status mismatch")
   536  		}
   537  	}
   538  	signer := types.HomesteadSigner{}
   539  
   540  	// test error status by sending an underpriced transaction
   541  	tx0, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)
   542  	test(tx0, true, light.TxStatus{Status: core.TxStatusUnknown, Error: core.ErrUnderpriced.Error()})
   543  
   544  	tx1, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   545  	test(tx1, false, light.TxStatus{Status: core.TxStatusUnknown}) // query before sending, should be unknown
   546  	test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // send valid processable tx, should return pending
   547  	test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // adding it again should not return an error
   548  
   549  	tx2, _ := types.SignTx(types.NewTransaction(1, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   550  	tx3, _ := types.SignTx(types.NewTransaction(2, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   551  	// send transactions in the wrong order, tx3 should be queued
   552  	test(tx3, true, light.TxStatus{Status: core.TxStatusQueued})
   553  	test(tx2, true, light.TxStatus{Status: core.TxStatusPending})
   554  	// query again, now tx3 should be pending too
   555  	test(tx3, false, light.TxStatus{Status: core.TxStatusPending})
   556  
   557  	// generate and add a block with tx1 and tx2 included
   558  	gchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 1, func(i int, block *core.BlockGen) {
   559  		block.AddTx(tx1)
   560  		block.AddTx(tx2)
   561  	})
   562  	if _, err := chain.InsertChain(gchain); err != nil {
   563  		panic(err)
   564  	}
   565  	// wait until TxPool processes the inserted block
   566  	for i := 0; i < 10; i++ {
   567  		if pending, _ := server.handler.txpool.Stats(); pending == 1 {
   568  			break
   569  		}
   570  		time.Sleep(100 * time.Millisecond)
   571  	}
   572  	if pending, _ := server.handler.txpool.Stats(); pending != 1 {
   573  		t.Fatalf("pending count mismatch: have %d, want 1", pending)
   574  	}
   575  	// Discard new block announcement
   576  	msg, _ := server.peer.app.ReadMsg()
   577  	msg.Discard()
   578  
   579  	// check if their status is included now
   580  	block1hash := rawdb.ReadCanonicalHash(server.db, 1)
   581  	test(tx1, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
   582  
   583  	test(tx2, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
   584  
   585  	// create a reorg that rolls them back
   586  	gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 2, func(i int, block *core.BlockGen) {})
   587  	if _, err := chain.InsertChain(gchain); err != nil {
   588  		panic(err)
   589  	}
   590  	// wait until TxPool processes the reorg
   591  	for i := 0; i < 10; i++ {
   592  		if pending, _ := server.handler.txpool.Stats(); pending == 3 {
   593  			break
   594  		}
   595  		time.Sleep(100 * time.Millisecond)
   596  	}
   597  	if pending, _ := server.handler.txpool.Stats(); pending != 3 {
   598  		t.Fatalf("pending count mismatch: have %d, want 3", pending)
   599  	}
   600  	// Discard new block announcement
   601  	msg, _ = server.peer.app.ReadMsg()
   602  	msg.Discard()
   603  
   604  	// check if their status is pending again
   605  	test(tx1, false, light.TxStatus{Status: core.TxStatusPending})
   606  	test(tx2, false, light.TxStatus{Status: core.TxStatusPending})
   607  }
   608  
   609  func TestStopResumeLes3(t *testing.T) {
   610  	server, tearDown := newServerEnv(t, 0, 3, nil, true, true, testBufLimit/10)
   611  	defer tearDown()
   612  
   613  	server.handler.server.costTracker.testing = true
   614  
   615  	var (
   616  		reqID    uint64
   617  		expBuf   = testBufLimit
   618  		testCost = testBufLimit / 10
   619  	)
   620  	header := server.handler.blockchain.CurrentHeader()
   621  	req := func() {
   622  		reqID++
   623  		sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, testCost, &getBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
   624  	}
   625  	for i := 1; i <= 5; i++ {
   626  		// send requests while we still have enough buffer and expect a response
   627  		for expBuf >= testCost {
   628  			req()
   629  			expBuf -= testCost
   630  			if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {
   631  				t.Errorf("expected response and failed: %v", err)
   632  			}
   633  		}
   634  		// send some more requests in excess and expect a single StopMsg
   635  		c := i
   636  		for c > 0 {
   637  			req()
   638  			c--
   639  		}
   640  		if err := p2p.ExpectMsg(server.peer.app, StopMsg, nil); err != nil {
   641  			t.Errorf("expected StopMsg and failed: %v", err)
   642  		}
   643  		// wait until the buffer is recharged by half of the limit
   644  		wait := testBufLimit / testBufRecharge / 2
   645  		server.clock.(*mclock.Simulated).Run(time.Millisecond * time.Duration(wait))
   646  
   647  		// expect a ResumeMsg with the partially recharged buffer value
   648  		expBuf += testBufRecharge * wait
   649  		if err := p2p.ExpectMsg(server.peer.app, ResumeMsg, expBuf); err != nil {
   650  			t.Errorf("expected ResumeMsg and failed: %v", err)
   651  		}
   652  	}
   653  }