github.com/bigzoro/my_simplechain@v0.0.0-20240315012955-8ad0a2a29bb9/les/handler_test.go (about)

     1  // Copyright 2016 The go-simplechain Authors
     2  // This file is part of the go-simplechain library.
     3  //
     4  // The go-simplechain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-simplechain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-simplechain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"encoding/binary"
    21  	"math/rand"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/bigzoro/my_simplechain/common"
    26  	"github.com/bigzoro/my_simplechain/common/mclock"
    27  	"github.com/bigzoro/my_simplechain/core"
    28  	"github.com/bigzoro/my_simplechain/core/rawdb"
    29  	"github.com/bigzoro/my_simplechain/core/types"
    30  	"github.com/bigzoro/my_simplechain/crypto"
    31  	"github.com/bigzoro/my_simplechain/eth/downloader"
    32  	"github.com/bigzoro/my_simplechain/light"
    33  	"github.com/bigzoro/my_simplechain/p2p"
    34  	"github.com/bigzoro/my_simplechain/rlp"
    35  	"github.com/bigzoro/my_simplechain/trie"
    36  )
    37  
    38  func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {
    39  	type resp struct {
    40  		ReqID, BV uint64
    41  		Data      interface{}
    42  	}
    43  	return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
    44  }
    45  
    46  // Tests that block headers can be retrieved from a remote chain based on user queries.
    47  func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
    48  func TestGetBlockHeadersLes3(t *testing.T) { testGetBlockHeaders(t, 3) }
    49  
    50  func testGetBlockHeaders(t *testing.T, protocol int) {
    51  	server, tearDown := newServerEnv(t, downloader.MaxHashFetch+15, protocol, nil, false, true, 0)
    52  	defer tearDown()
    53  
    54  	bc := server.handler.blockchain
    55  
    56  	// Create a "random" unknown hash for testing
    57  	var unknown common.Hash
    58  	for i := range unknown {
    59  		unknown[i] = byte(i)
    60  	}
    61  	// Create a batch of tests for various scenarios
    62  	limit := uint64(MaxHeaderFetch)
    63  	tests := []struct {
    64  		query  *getBlockHeadersData // The query to execute for header retrieval
    65  		expect []common.Hash        // The hashes of the block whose headers are expected
    66  	}{
    67  		// A single random block should be retrievable by hash and number too
    68  		{
    69  			&getBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
    70  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    71  		}, {
    72  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
    73  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    74  		},
    75  		// Multiple headers should be retrievable in both directions
    76  		{
    77  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
    78  			[]common.Hash{
    79  				bc.GetBlockByNumber(limit / 2).Hash(),
    80  				bc.GetBlockByNumber(limit/2 + 1).Hash(),
    81  				bc.GetBlockByNumber(limit/2 + 2).Hash(),
    82  			},
    83  		}, {
    84  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
    85  			[]common.Hash{
    86  				bc.GetBlockByNumber(limit / 2).Hash(),
    87  				bc.GetBlockByNumber(limit/2 - 1).Hash(),
    88  				bc.GetBlockByNumber(limit/2 - 2).Hash(),
    89  			},
    90  		},
    91  		// Multiple headers with skip lists should be retrievable
    92  		{
    93  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
    94  			[]common.Hash{
    95  				bc.GetBlockByNumber(limit / 2).Hash(),
    96  				bc.GetBlockByNumber(limit/2 + 4).Hash(),
    97  				bc.GetBlockByNumber(limit/2 + 8).Hash(),
    98  			},
    99  		}, {
   100  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
   101  			[]common.Hash{
   102  				bc.GetBlockByNumber(limit / 2).Hash(),
   103  				bc.GetBlockByNumber(limit/2 - 4).Hash(),
   104  				bc.GetBlockByNumber(limit/2 - 8).Hash(),
   105  			},
   106  		},
   107  		// The chain endpoints should be retrievable
   108  		{
   109  			&getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
   110  			[]common.Hash{bc.GetBlockByNumber(0).Hash()},
   111  		}, {
   112  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1},
   113  			[]common.Hash{bc.CurrentBlock().Hash()},
   114  		},
   115  		// Ensure protocol limits are honored
   116  		//{
   117  		//	&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
   118  		//	[]common.Hash{},
   119  		//},
   120  		// Check that requesting more than available is handled gracefully
   121  		{
   122  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
   123  			[]common.Hash{
   124  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   125  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(),
   126  			},
   127  		}, {
   128  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
   129  			[]common.Hash{
   130  				bc.GetBlockByNumber(4).Hash(),
   131  				bc.GetBlockByNumber(0).Hash(),
   132  			},
   133  		},
   134  		// Check that requesting more than available is handled gracefully, even if mid skip
   135  		{
   136  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
   137  			[]common.Hash{
   138  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   139  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(),
   140  			},
   141  		}, {
   142  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
   143  			[]common.Hash{
   144  				bc.GetBlockByNumber(4).Hash(),
   145  				bc.GetBlockByNumber(1).Hash(),
   146  			},
   147  		},
   148  		// Check that non existing headers aren't returned
   149  		{
   150  			&getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
   151  			[]common.Hash{},
   152  		}, {
   153  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1},
   154  			[]common.Hash{},
   155  		},
   156  	}
   157  	// Run each of the tests and verify the results against the chain
   158  	var reqID uint64
   159  	for i, tt := range tests {
   160  		// Collect the headers to expect in the response
   161  		var headers []*types.Header
   162  		for _, hash := range tt.expect {
   163  			headers = append(headers, bc.GetHeaderByHash(hash))
   164  		}
   165  		// Send the hash request and verify the response
   166  		reqID++
   167  
   168  		cost := server.peer.peer.GetRequestCost(GetBlockHeadersMsg, int(tt.query.Amount))
   169  		sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, cost, tt.query)
   170  		if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
   171  			t.Errorf("test %d: headers mismatch: %v", i, err)
   172  		}
   173  	}
   174  }
   175  
   176  // Tests that block contents can be retrieved from a remote chain based on their hashes.
   177  func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
   178  func TestGetBlockBodiesLes3(t *testing.T) { testGetBlockBodies(t, 3) }
   179  
   180  func testGetBlockBodies(t *testing.T, protocol int) {
   181  	server, tearDown := newServerEnv(t, downloader.MaxBlockFetch+15, protocol, nil, false, true, 0)
   182  	defer tearDown()
   183  
   184  	bc := server.handler.blockchain
   185  
   186  	// Create a batch of tests for various scenarios
   187  	limit := MaxBodyFetch
   188  	tests := []struct {
   189  		random    int           // Number of blocks to fetch randomly from the chain
   190  		explicit  []common.Hash // Explicitly requested blocks
   191  		available []bool        // Availability of explicitly requested blocks
   192  		expected  int           // Total number of existing blocks to expect
   193  	}{
   194  		{1, nil, nil, 1},         // A single random block should be retrievable
   195  		{10, nil, nil, 10},       // Multiple random blocks should be retrievable
   196  		{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
   197  		//{limit + 1, nil, nil, limit},                                  // No more than the possible block count should be returned
   198  		{0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1},      // The genesis block should be retrievable
   199  		{0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
   200  		{0, []common.Hash{{}}, []bool{false}, 0},                      // A non existent block should not be returned
   201  
   202  		// Existing and non-existing blocks interleaved should not cause problems
   203  		{0, []common.Hash{
   204  			{},
   205  			bc.GetBlockByNumber(1).Hash(),
   206  			{},
   207  			bc.GetBlockByNumber(10).Hash(),
   208  			{},
   209  			bc.GetBlockByNumber(100).Hash(),
   210  			{},
   211  		}, []bool{false, true, false, true, false, true, false}, 3},
   212  	}
   213  	// Run each of the tests and verify the results against the chain
   214  	var reqID uint64
   215  	for i, tt := range tests {
   216  		// Collect the hashes to request, and the response to expect
   217  		var hashes []common.Hash
   218  		seen := make(map[int64]bool)
   219  		var bodies []*types.Body
   220  
   221  		for j := 0; j < tt.random; j++ {
   222  			for {
   223  				num := rand.Int63n(int64(bc.CurrentBlock().NumberU64()))
   224  				if !seen[num] {
   225  					seen[num] = true
   226  
   227  					block := bc.GetBlockByNumber(uint64(num))
   228  					hashes = append(hashes, block.Hash())
   229  					if len(bodies) < tt.expected {
   230  						bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   231  					}
   232  					break
   233  				}
   234  			}
   235  		}
   236  		for j, hash := range tt.explicit {
   237  			hashes = append(hashes, hash)
   238  			if tt.available[j] && len(bodies) < tt.expected {
   239  				block := bc.GetBlockByHash(hash)
   240  				bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   241  			}
   242  		}
   243  		reqID++
   244  
   245  		// Send the hash request and verify the response
   246  		cost := server.peer.peer.GetRequestCost(GetBlockBodiesMsg, len(hashes))
   247  		sendRequest(server.peer.app, GetBlockBodiesMsg, reqID, cost, hashes)
   248  		if err := expectResponse(server.peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
   249  			t.Errorf("test %d: bodies mismatch: %v", i, err)
   250  		}
   251  	}
   252  }
   253  
   254  // Tests that the contract codes can be retrieved based on account addresses.
   255  func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
   256  func TestGetCodeLes3(t *testing.T) { testGetCode(t, 3) }
   257  
   258  func testGetCode(t *testing.T, protocol int) {
   259  	// Assemble the test environment
   260  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   261  	defer tearDown()
   262  	bc := server.handler.blockchain
   263  
   264  	var codereqs []*CodeReq
   265  	var codes [][]byte
   266  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   267  		header := bc.GetHeaderByNumber(i)
   268  		req := &CodeReq{
   269  			BHash:  header.Hash(),
   270  			AccKey: crypto.Keccak256(testContractAddr[:]),
   271  		}
   272  		codereqs = append(codereqs, req)
   273  		if i >= testContractDeployed {
   274  			codes = append(codes, testContractCodeDeployed)
   275  		}
   276  	}
   277  
   278  	cost := server.peer.peer.GetRequestCost(GetCodeMsg, len(codereqs))
   279  	sendRequest(server.peer.app, GetCodeMsg, 42, cost, codereqs)
   280  	if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
   281  		t.Errorf("codes mismatch: %v", err)
   282  	}
   283  }
   284  
   285  // Tests that the stale contract codes can't be retrieved based on account addresses.
   286  func TestGetStaleCodeLes2(t *testing.T) { testGetStaleCode(t, 2) }
   287  func TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) }
   288  
   289  func testGetStaleCode(t *testing.T, protocol int) {
   290  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
   291  	defer tearDown()
   292  	bc := server.handler.blockchain
   293  
   294  	check := func(number uint64, expected [][]byte) {
   295  		req := &CodeReq{
   296  			BHash:  bc.GetHeaderByNumber(number).Hash(),
   297  			AccKey: crypto.Keccak256(testContractAddr[:]),
   298  		}
   299  		cost := server.peer.peer.GetRequestCost(GetCodeMsg, 1)
   300  		sendRequest(server.peer.app, GetCodeMsg, 42, cost, []*CodeReq{req})
   301  		if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
   302  			t.Errorf("codes mismatch: %v", err)
   303  		}
   304  	}
   305  	check(0, [][]byte{})                                                          // Non-exist contract
   306  	check(testContractDeployed, [][]byte{})                                       // Stale contract
   307  	check(bc.CurrentHeader().Number.Uint64(), [][]byte{testContractCodeDeployed}) // Fresh contract
   308  }
   309  
   310  // Tests that the transaction receipts can be retrieved based on hashes.
   311  func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
   312  func TestGetReceiptLes3(t *testing.T) { testGetReceipt(t, 3) }
   313  
   314  func testGetReceipt(t *testing.T, protocol int) {
   315  	// Assemble the test environment
   316  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   317  	defer tearDown()
   318  
   319  	bc := server.handler.blockchain
   320  
   321  	// Collect the hashes to request, and the response to expect
   322  	var receipts []types.Receipts
   323  	var hashes []common.Hash
   324  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   325  		block := bc.GetBlockByNumber(i)
   326  
   327  		hashes = append(hashes, block.Hash())
   328  		receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64()))
   329  	}
   330  	// Send the hash request and verify the response
   331  	cost := server.peer.peer.GetRequestCost(GetReceiptsMsg, len(hashes))
   332  	sendRequest(server.peer.app, GetReceiptsMsg, 42, cost, hashes)
   333  	if err := expectResponse(server.peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
   334  		t.Errorf("receipts mismatch: %v", err)
   335  	}
   336  }
   337  
   338  // Tests that trie merkle proofs can be retrieved
   339  func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
   340  func TestGetProofsLes3(t *testing.T) { testGetProofs(t, 3) }
   341  
   342  func testGetProofs(t *testing.T, protocol int) {
   343  	// Assemble the test environment
   344  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   345  	defer tearDown()
   346  
   347  	bc := server.handler.blockchain
   348  
   349  	var proofreqs []ProofReq
   350  	proofsV2 := light.NewNodeSet()
   351  
   352  	accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
   353  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   354  		header := bc.GetHeaderByNumber(i)
   355  		trie, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   356  
   357  		for _, acc := range accounts {
   358  			req := ProofReq{
   359  				BHash: header.Hash(),
   360  				Key:   crypto.Keccak256(acc[:]),
   361  			}
   362  			proofreqs = append(proofreqs, req)
   363  			trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
   364  		}
   365  	}
   366  	// Send the proof request and verify the response
   367  	cost := server.peer.peer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
   368  	sendRequest(server.peer.app, GetProofsV2Msg, 42, cost, proofreqs)
   369  	if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
   370  		t.Errorf("proofs mismatch: %v", err)
   371  	}
   372  }
   373  
   374  // Tests that the stale contract codes can't be retrieved based on account addresses.
   375  func TestGetStaleProofLes2(t *testing.T) { testGetStaleProof(t, 2) }
   376  func TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) }
   377  
   378  func testGetStaleProof(t *testing.T, protocol int) {
   379  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
   380  	defer tearDown()
   381  	bc := server.handler.blockchain
   382  
   383  	check := func(number uint64, wantOK bool) {
   384  		var (
   385  			header  = bc.GetHeaderByNumber(number)
   386  			account = crypto.Keccak256(userAddr1.Bytes())
   387  		)
   388  		req := &ProofReq{
   389  			BHash: header.Hash(),
   390  			Key:   account,
   391  		}
   392  		cost := server.peer.peer.GetRequestCost(GetProofsV2Msg, 1)
   393  		sendRequest(server.peer.app, GetProofsV2Msg, 42, cost, []*ProofReq{req})
   394  
   395  		var expected []rlp.RawValue
   396  		if wantOK {
   397  			proofsV2 := light.NewNodeSet()
   398  			t, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   399  			t.Prove(account, 0, proofsV2)
   400  			expected = proofsV2.NodeList()
   401  		}
   402  		if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
   403  			t.Errorf("codes mismatch: %v", err)
   404  		}
   405  	}
   406  	check(0, false)                                 // Non-exist proof
   407  	check(2, false)                                 // Stale proof
   408  	check(bc.CurrentHeader().Number.Uint64(), true) // Fresh proof
   409  }
   410  
   411  // Tests that CHT proofs can be correctly retrieved.
   412  func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
   413  func TestGetCHTProofsLes3(t *testing.T) { testGetCHTProofs(t, 3) }
   414  
   415  func testGetCHTProofs(t *testing.T, protocol int) {
   416  	config := light.TestServerIndexerConfig
   417  
   418  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   419  		for {
   420  			cs, _, _ := cIndexer.Sections()
   421  			if cs >= 1 {
   422  				break
   423  			}
   424  			time.Sleep(10 * time.Millisecond)
   425  		}
   426  	}
   427  	server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers, false, true, 0)
   428  	defer tearDown()
   429  
   430  	bc := server.handler.blockchain
   431  
   432  	// Assemble the proofs from the different protocols
   433  	header := bc.GetHeaderByNumber(config.ChtSize - 1)
   434  	rlp, _ := rlp.EncodeToBytes(header)
   435  
   436  	key := make([]byte, 8)
   437  	binary.BigEndian.PutUint64(key, config.ChtSize-1)
   438  
   439  	proofsV2 := HelperTrieResps{
   440  		AuxData: [][]byte{rlp},
   441  	}
   442  	root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
   443  	trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
   444  	trie.Prove(key, 0, &proofsV2.Proofs)
   445  	// Assemble the requests for the different protocols
   446  	requestsV2 := []HelperTrieReq{{
   447  		Type:    htCanonical,
   448  		TrieIdx: 0,
   449  		Key:     key,
   450  		AuxReq:  auxHeader,
   451  	}}
   452  	// Send the proof request and verify the response
   453  	cost := server.peer.peer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
   454  	sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
   455  	if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
   456  		t.Errorf("proofs mismatch: %v", err)
   457  	}
   458  }
   459  
   460  func TestGetBloombitsProofsLes2(t *testing.T) { testGetBloombitsProofs(t, 2) }
   461  func TestGetBloombitsProofsLes3(t *testing.T) { testGetBloombitsProofs(t, 3) }
   462  
   463  // Tests that bloombits proofs can be correctly retrieved.
   464  func testGetBloombitsProofs(t *testing.T, protocol int) {
   465  	config := light.TestServerIndexerConfig
   466  
   467  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   468  		for {
   469  			bts, _, _ := btIndexer.Sections()
   470  			if bts >= 1 {
   471  				break
   472  			}
   473  			time.Sleep(10 * time.Millisecond)
   474  		}
   475  	}
   476  	server, tearDown := newServerEnv(t, int(config.BloomTrieSize+config.BloomTrieConfirms), protocol, waitIndexers, false, true, 0)
   477  	defer tearDown()
   478  
   479  	bc := server.handler.blockchain
   480  
   481  	// Request and verify each bit of the bloom bits proofs
   482  	for bit := 0; bit < 2048; bit++ {
   483  		// Assemble the request and proofs for the bloombits
   484  		key := make([]byte, 10)
   485  
   486  		binary.BigEndian.PutUint16(key[:2], uint16(bit))
   487  		// Only the first bloom section has data.
   488  		binary.BigEndian.PutUint64(key[2:], 0)
   489  
   490  		requests := []HelperTrieReq{{
   491  			Type:    htBloomBits,
   492  			TrieIdx: 0,
   493  			Key:     key,
   494  		}}
   495  		var proofs HelperTrieResps
   496  
   497  		root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
   498  		trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
   499  		trie.Prove(key, 0, &proofs.Proofs)
   500  
   501  		// Send the proof request and verify the response
   502  		cost := server.peer.peer.GetRequestCost(GetHelperTrieProofsMsg, len(requests))
   503  		sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, cost, requests)
   504  		if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
   505  			t.Errorf("bit %d: proofs mismatch: %v", bit, err)
   506  		}
   507  	}
   508  }
   509  
   510  func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, 2) }
   511  func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, 3) }
   512  
   513  func testTransactionStatus(t *testing.T, protocol int) {
   514  	//server, tearDown := newServerEnv(t, 0, protocol, nil, false, true, 0)
   515  	//defer tearDown()
   516  	//server.handler.addTxsSync = true
   517  	//
   518  	//chain := server.handler.blockchain
   519  	//
   520  	//var reqID uint64
   521  	//
   522  	//test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
   523  	//	reqID++
   524  	//	if send {
   525  	//		cost := server.peer.peer.GetRequestCost(SendTxV2Msg, 1)
   526  	//		sendRequest(server.peer.app, SendTxV2Msg, reqID, cost, types.Transactions{tx})
   527  	//	} else {
   528  	//		cost := server.peer.peer.GetRequestCost(GetTxStatusMsg, 1)
   529  	//		sendRequest(server.peer.app, GetTxStatusMsg, reqID, cost, []common.Hash{tx.Hash()})
   530  	//	}
   531  	//	if err := expectResponse(server.peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
   532  	//		t.Errorf("transaction status mismatch")
   533  	//	}
   534  	//}
   535  	//signer := types.HomesteadSigner{}
   536  	//
   537  	//// test error status by sending an underpriced transaction
   538  	//tx0, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)
   539  	//test(tx0, true, light.TxStatus{Status: core.TxStatusUnknown, Error: core.ErrUnderpriced.Error()})
   540  	//
   541  	//tx1, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   542  	//test(tx1, false, light.TxStatus{Status: core.TxStatusUnknown}) // query before sending, should be unknown
   543  	//test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // send valid processable tx, should return pending
   544  	//test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // adding it again should not return an error
   545  	//
   546  	//tx2, _ := types.SignTx(types.NewTransaction(1, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   547  	//tx3, _ := types.SignTx(types.NewTransaction(2, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   548  	//// send transactions in the wrong order, tx3 should be queued
   549  	//test(tx3, true, light.TxStatus{Status: core.TxStatusQueued})
   550  	//test(tx2, true, light.TxStatus{Status: core.TxStatusPending})
   551  	//// query again, now tx3 should be pending too
   552  	//test(tx3, false, light.TxStatus{Status: core.TxStatusPending})
   553  	//
   554  	//// generate and add a block with tx1 and tx2 included
   555  	//gchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 1, func(i int, block *core.BlockGen) {
   556  	//	block.AddTx(tx1)
   557  	//	block.AddTx(tx2)
   558  	//})
   559  	//if _, err := chain.InsertChain(gchain); err != nil {
   560  	//	panic(err)
   561  	//}
   562  	//// wait until TxPool processes the inserted block
   563  	//for i := 0; i < 10; i++ {
   564  	//	if pending, _ := server.handler.txpool.Stats(); pending == 1 {
   565  	//		break
   566  	//	}
   567  	//	time.Sleep(100 * time.Millisecond)
   568  	//}
   569  	//if pending, _ := server.handler.txpool.Stats(); pending != 1 {
   570  	//	t.Fatalf("pending count mismatch: have %d, want 1", pending)
   571  	//}
   572  	//// Discard new block announcement
   573  	//msg, _ := server.peer.app.ReadMsg()
   574  	//msg.Discard()
   575  	//
   576  	//// check if their status is included now
   577  	//block1hash := rawdb.ReadCanonicalHash(server.db, 1)
   578  	//test(tx1, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
   579  	//
   580  	//test(tx2, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
   581  	//
   582  	//// create a reorg that rolls them back
   583  	//gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 2, func(i int, block *core.BlockGen) {})
   584  	//if _, err := chain.InsertChain(gchain); err != nil {
   585  	//	panic(err)
   586  	//}
   587  	//// wait until TxPool processes the reorg
   588  	//for i := 0; i < 10; i++ {
   589  	//	if pending, _ := server.handler.txpool.Stats(); pending == 3 {
   590  	//		break
   591  	//	}
   592  	//	time.Sleep(100 * time.Millisecond)
   593  	//}
   594  	//if pending, _ := server.handler.txpool.Stats(); pending != 3 {
   595  	//	t.Fatalf("pending count mismatch: have %d, want 3", pending)
   596  	//}
   597  	//// Discard new block announcement
   598  	//msg, _ = server.peer.app.ReadMsg()
   599  	//msg.Discard()
   600  	//
   601  	//// check if their status is pending again
   602  	//test(tx1, false, light.TxStatus{Status: core.TxStatusPending})
   603  	//test(tx2, false, light.TxStatus{Status: core.TxStatusPending})
   604  }
   605  
   606  func TestStopResumeLes3(t *testing.T) {
   607  	server, tearDown := newServerEnv(t, 0, 3, nil, true, true, testBufLimit/10)
   608  	defer tearDown()
   609  
   610  	server.handler.server.costTracker.testing = true
   611  
   612  	var (
   613  		reqID    uint64
   614  		expBuf   = testBufLimit
   615  		testCost = testBufLimit / 10
   616  	)
   617  	header := server.handler.blockchain.CurrentHeader()
   618  	req := func() {
   619  		reqID++
   620  		sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, testCost, &getBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
   621  	}
   622  	for i := 1; i <= 5; i++ {
   623  		// send requests while we still have enough buffer and expect a response
   624  		for expBuf >= testCost {
   625  			req()
   626  			expBuf -= testCost
   627  			if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {
   628  				t.Errorf("expected response and failed: %v", err)
   629  			}
   630  		}
   631  		// send some more requests in excess and expect a single StopMsg
   632  		c := i
   633  		for c > 0 {
   634  			req()
   635  			c--
   636  		}
   637  		if err := p2p.ExpectMsg(server.peer.app, StopMsg, nil); err != nil {
   638  			t.Errorf("expected StopMsg and failed: %v", err)
   639  		}
   640  		// wait until the buffer is recharged by half of the limit
   641  		wait := testBufLimit / testBufRecharge / 2
   642  		server.clock.(*mclock.Simulated).Run(time.Millisecond * time.Duration(wait))
   643  
   644  		// expect a ResumeMsg with the partially recharged buffer value
   645  		expBuf += testBufRecharge * wait
   646  		if err := p2p.ExpectMsg(server.peer.app, ResumeMsg, expBuf); err != nil {
   647  			t.Errorf("expected ResumeMsg and failed: %v", err)
   648  		}
   649  	}
   650  }