github.com/aswedchain/aswed@v1.0.1/les/handler_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"encoding/binary"
    21  	"math/big"
    22  	"math/rand"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/aswedchain/aswed/common"
    27  	"github.com/aswedchain/aswed/common/mclock"
    28  	"github.com/aswedchain/aswed/consensus/ethash"
    29  	"github.com/aswedchain/aswed/core"
    30  	"github.com/aswedchain/aswed/core/rawdb"
    31  	"github.com/aswedchain/aswed/core/types"
    32  	"github.com/aswedchain/aswed/crypto"
    33  	"github.com/aswedchain/aswed/eth/downloader"
    34  	"github.com/aswedchain/aswed/light"
    35  	"github.com/aswedchain/aswed/p2p"
    36  	"github.com/aswedchain/aswed/params"
    37  	"github.com/aswedchain/aswed/rlp"
    38  	"github.com/aswedchain/aswed/trie"
    39  )
    40  
    41  func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {
    42  	type resp struct {
    43  		ReqID, BV uint64
    44  		Data      interface{}
    45  	}
    46  	return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
    47  }
    48  
    49  // Tests that block headers can be retrieved from a remote chain based on user queries.
    50  func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
    51  func TestGetBlockHeadersLes3(t *testing.T) { testGetBlockHeaders(t, 3) }
    52  
    53  func testGetBlockHeaders(t *testing.T, protocol int) {
    54  	server, tearDown := newServerEnv(t, downloader.MaxHashFetch+15, protocol, nil, false, true, 0)
    55  	defer tearDown()
    56  
    57  	bc := server.handler.blockchain
    58  
    59  	// Create a "random" unknown hash for testing
    60  	var unknown common.Hash
    61  	for i := range unknown {
    62  		unknown[i] = byte(i)
    63  	}
    64  	// Create a batch of tests for various scenarios
    65  	limit := uint64(MaxHeaderFetch)
    66  	tests := []struct {
    67  		query  *getBlockHeadersData // The query to execute for header retrieval
    68  		expect []common.Hash        // The hashes of the block whose headers are expected
    69  	}{
    70  		// A single random block should be retrievable by hash and number too
    71  		{
    72  			&getBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
    73  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    74  		}, {
    75  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
    76  			[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
    77  		},
    78  		// Multiple headers should be retrievable in both directions
    79  		{
    80  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
    81  			[]common.Hash{
    82  				bc.GetBlockByNumber(limit / 2).Hash(),
    83  				bc.GetBlockByNumber(limit/2 + 1).Hash(),
    84  				bc.GetBlockByNumber(limit/2 + 2).Hash(),
    85  			},
    86  		}, {
    87  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
    88  			[]common.Hash{
    89  				bc.GetBlockByNumber(limit / 2).Hash(),
    90  				bc.GetBlockByNumber(limit/2 - 1).Hash(),
    91  				bc.GetBlockByNumber(limit/2 - 2).Hash(),
    92  			},
    93  		},
    94  		// Multiple headers with skip lists should be retrievable
    95  		{
    96  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
    97  			[]common.Hash{
    98  				bc.GetBlockByNumber(limit / 2).Hash(),
    99  				bc.GetBlockByNumber(limit/2 + 4).Hash(),
   100  				bc.GetBlockByNumber(limit/2 + 8).Hash(),
   101  			},
   102  		}, {
   103  			&getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
   104  			[]common.Hash{
   105  				bc.GetBlockByNumber(limit / 2).Hash(),
   106  				bc.GetBlockByNumber(limit/2 - 4).Hash(),
   107  				bc.GetBlockByNumber(limit/2 - 8).Hash(),
   108  			},
   109  		},
   110  		// The chain endpoints should be retrievable
   111  		{
   112  			&getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
   113  			[]common.Hash{bc.GetBlockByNumber(0).Hash()},
   114  		}, {
   115  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1},
   116  			[]common.Hash{bc.CurrentBlock().Hash()},
   117  		},
   118  		// Ensure protocol limits are honored
   119  		//{
   120  		//	&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
   121  		//	[]common.Hash{},
   122  		//},
   123  		// Check that requesting more than available is handled gracefully
   124  		{
   125  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
   126  			[]common.Hash{
   127  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   128  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(),
   129  			},
   130  		}, {
   131  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
   132  			[]common.Hash{
   133  				bc.GetBlockByNumber(4).Hash(),
   134  				bc.GetBlockByNumber(0).Hash(),
   135  			},
   136  		},
   137  		// Check that requesting more than available is handled gracefully, even if mid skip
   138  		{
   139  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
   140  			[]common.Hash{
   141  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
   142  				bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(),
   143  			},
   144  		}, {
   145  			&getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
   146  			[]common.Hash{
   147  				bc.GetBlockByNumber(4).Hash(),
   148  				bc.GetBlockByNumber(1).Hash(),
   149  			},
   150  		},
   151  		// Check that non existing headers aren't returned
   152  		{
   153  			&getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
   154  			[]common.Hash{},
   155  		}, {
   156  			&getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1},
   157  			[]common.Hash{},
   158  		},
   159  	}
   160  	// Run each of the tests and verify the results against the chain
   161  	var reqID uint64
   162  	for i, tt := range tests {
   163  		// Collect the headers to expect in the response
   164  		var headers []*types.Header
   165  		for _, hash := range tt.expect {
   166  			headers = append(headers, bc.GetHeaderByHash(hash))
   167  		}
   168  		// Send the hash request and verify the response
   169  		reqID++
   170  
   171  		sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, tt.query)
   172  		if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
   173  			t.Errorf("test %d: headers mismatch: %v", i, err)
   174  		}
   175  	}
   176  }
   177  
   178  // Tests that block contents can be retrieved from a remote chain based on their hashes.
   179  func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
   180  func TestGetBlockBodiesLes3(t *testing.T) { testGetBlockBodies(t, 3) }
   181  
   182  func testGetBlockBodies(t *testing.T, protocol int) {
   183  	server, tearDown := newServerEnv(t, downloader.MaxBlockFetch+15, protocol, nil, false, true, 0)
   184  	defer tearDown()
   185  
   186  	bc := server.handler.blockchain
   187  
   188  	// Create a batch of tests for various scenarios
   189  	limit := MaxBodyFetch
   190  	tests := []struct {
   191  		random    int           // Number of blocks to fetch randomly from the chain
   192  		explicit  []common.Hash // Explicitly requested blocks
   193  		available []bool        // Availability of explicitly requested blocks
   194  		expected  int           // Total number of existing blocks to expect
   195  	}{
   196  		{1, nil, nil, 1},         // A single random block should be retrievable
   197  		{10, nil, nil, 10},       // Multiple random blocks should be retrievable
   198  		{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
   199  		//{limit + 1, nil, nil, limit},                                  // No more than the possible block count should be returned
   200  		{0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1},      // The genesis block should be retrievable
   201  		{0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
   202  		{0, []common.Hash{{}}, []bool{false}, 0},                      // A non existent block should not be returned
   203  
   204  		// Existing and non-existing blocks interleaved should not cause problems
   205  		{0, []common.Hash{
   206  			{},
   207  			bc.GetBlockByNumber(1).Hash(),
   208  			{},
   209  			bc.GetBlockByNumber(10).Hash(),
   210  			{},
   211  			bc.GetBlockByNumber(100).Hash(),
   212  			{},
   213  		}, []bool{false, true, false, true, false, true, false}, 3},
   214  	}
   215  	// Run each of the tests and verify the results against the chain
   216  	var reqID uint64
   217  	for i, tt := range tests {
   218  		// Collect the hashes to request, and the response to expect
   219  		var hashes []common.Hash
   220  		seen := make(map[int64]bool)
   221  		var bodies []*types.Body
   222  
   223  		for j := 0; j < tt.random; j++ {
   224  			for {
   225  				num := rand.Int63n(int64(bc.CurrentBlock().NumberU64()))
   226  				if !seen[num] {
   227  					seen[num] = true
   228  
   229  					block := bc.GetBlockByNumber(uint64(num))
   230  					hashes = append(hashes, block.Hash())
   231  					if len(bodies) < tt.expected {
   232  						bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   233  					}
   234  					break
   235  				}
   236  			}
   237  		}
   238  		for j, hash := range tt.explicit {
   239  			hashes = append(hashes, hash)
   240  			if tt.available[j] && len(bodies) < tt.expected {
   241  				block := bc.GetBlockByHash(hash)
   242  				bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
   243  			}
   244  		}
   245  		reqID++
   246  
   247  		// Send the hash request and verify the response
   248  		sendRequest(server.peer.app, GetBlockBodiesMsg, reqID, hashes)
   249  		if err := expectResponse(server.peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
   250  			t.Errorf("test %d: bodies mismatch: %v", i, err)
   251  		}
   252  	}
   253  }
   254  
   255  // Tests that the contract codes can be retrieved based on account addresses.
   256  func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
   257  func TestGetCodeLes3(t *testing.T) { testGetCode(t, 3) }
   258  
   259  func testGetCode(t *testing.T, protocol int) {
   260  	// Assemble the test environment
   261  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   262  	defer tearDown()
   263  	bc := server.handler.blockchain
   264  
   265  	var codereqs []*CodeReq
   266  	var codes [][]byte
   267  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   268  		header := bc.GetHeaderByNumber(i)
   269  		req := &CodeReq{
   270  			BHash:  header.Hash(),
   271  			AccKey: crypto.Keccak256(testContractAddr[:]),
   272  		}
   273  		codereqs = append(codereqs, req)
   274  		if i >= testContractDeployed {
   275  			codes = append(codes, testContractCodeDeployed)
   276  		}
   277  	}
   278  
   279  	sendRequest(server.peer.app, GetCodeMsg, 42, codereqs)
   280  	if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
   281  		t.Errorf("codes mismatch: %v", err)
   282  	}
   283  }
   284  
   285  // Tests that the stale contract codes can't be retrieved based on account addresses.
   286  func TestGetStaleCodeLes2(t *testing.T) { testGetStaleCode(t, 2) }
   287  func TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) }
   288  
   289  func testGetStaleCode(t *testing.T, protocol int) {
   290  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
   291  	defer tearDown()
   292  	bc := server.handler.blockchain
   293  
   294  	check := func(number uint64, expected [][]byte) {
   295  		req := &CodeReq{
   296  			BHash:  bc.GetHeaderByNumber(number).Hash(),
   297  			AccKey: crypto.Keccak256(testContractAddr[:]),
   298  		}
   299  		sendRequest(server.peer.app, GetCodeMsg, 42, []*CodeReq{req})
   300  		if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
   301  			t.Errorf("codes mismatch: %v", err)
   302  		}
   303  	}
   304  	check(0, [][]byte{})                                                          // Non-exist contract
   305  	check(testContractDeployed, [][]byte{})                                       // Stale contract
   306  	check(bc.CurrentHeader().Number.Uint64(), [][]byte{testContractCodeDeployed}) // Fresh contract
   307  }
   308  
   309  // Tests that the transaction receipts can be retrieved based on hashes.
   310  func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
   311  func TestGetReceiptLes3(t *testing.T) { testGetReceipt(t, 3) }
   312  
   313  func testGetReceipt(t *testing.T, protocol int) {
   314  	// Assemble the test environment
   315  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   316  	defer tearDown()
   317  
   318  	bc := server.handler.blockchain
   319  
   320  	// Collect the hashes to request, and the response to expect
   321  	var receipts []types.Receipts
   322  	var hashes []common.Hash
   323  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   324  		block := bc.GetBlockByNumber(i)
   325  
   326  		hashes = append(hashes, block.Hash())
   327  		receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64()))
   328  	}
   329  	// Send the hash request and verify the response
   330  	sendRequest(server.peer.app, GetReceiptsMsg, 42, hashes)
   331  	if err := expectResponse(server.peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
   332  		t.Errorf("receipts mismatch: %v", err)
   333  	}
   334  }
   335  
   336  // Tests that trie merkle proofs can be retrieved
   337  func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
   338  func TestGetProofsLes3(t *testing.T) { testGetProofs(t, 3) }
   339  
   340  func testGetProofs(t *testing.T, protocol int) {
   341  	// Assemble the test environment
   342  	server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
   343  	defer tearDown()
   344  
   345  	bc := server.handler.blockchain
   346  
   347  	var proofreqs []ProofReq
   348  	proofsV2 := light.NewNodeSet()
   349  
   350  	accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
   351  	for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
   352  		header := bc.GetHeaderByNumber(i)
   353  		trie, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   354  
   355  		for _, acc := range accounts {
   356  			req := ProofReq{
   357  				BHash: header.Hash(),
   358  				Key:   crypto.Keccak256(acc[:]),
   359  			}
   360  			proofreqs = append(proofreqs, req)
   361  			trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
   362  		}
   363  	}
   364  	// Send the proof request and verify the response
   365  	sendRequest(server.peer.app, GetProofsV2Msg, 42, proofreqs)
   366  	if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
   367  		t.Errorf("proofs mismatch: %v", err)
   368  	}
   369  }
   370  
   371  // Tests that the stale contract codes can't be retrieved based on account addresses.
   372  func TestGetStaleProofLes2(t *testing.T) { testGetStaleProof(t, 2) }
   373  func TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) }
   374  
   375  func testGetStaleProof(t *testing.T, protocol int) {
   376  	server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
   377  	defer tearDown()
   378  	bc := server.handler.blockchain
   379  
   380  	check := func(number uint64, wantOK bool) {
   381  		var (
   382  			header  = bc.GetHeaderByNumber(number)
   383  			account = crypto.Keccak256(userAddr1.Bytes())
   384  		)
   385  		req := &ProofReq{
   386  			BHash: header.Hash(),
   387  			Key:   account,
   388  		}
   389  		sendRequest(server.peer.app, GetProofsV2Msg, 42, []*ProofReq{req})
   390  
   391  		var expected []rlp.RawValue
   392  		if wantOK {
   393  			proofsV2 := light.NewNodeSet()
   394  			t, _ := trie.New(header.Root, trie.NewDatabase(server.db))
   395  			t.Prove(account, 0, proofsV2)
   396  			expected = proofsV2.NodeList()
   397  		}
   398  		if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
   399  			t.Errorf("codes mismatch: %v", err)
   400  		}
   401  	}
   402  	check(0, false)                                 // Non-exist proof
   403  	check(2, false)                                 // Stale proof
   404  	check(bc.CurrentHeader().Number.Uint64(), true) // Fresh proof
   405  }
   406  
   407  // Tests that CHT proofs can be correctly retrieved.
   408  func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
   409  func TestGetCHTProofsLes3(t *testing.T) { testGetCHTProofs(t, 3) }
   410  
   411  func testGetCHTProofs(t *testing.T, protocol int) {
   412  	config := light.TestServerIndexerConfig
   413  
   414  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   415  		for {
   416  			cs, _, _ := cIndexer.Sections()
   417  			if cs >= 1 {
   418  				break
   419  			}
   420  			time.Sleep(10 * time.Millisecond)
   421  		}
   422  	}
   423  	server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers, false, true, 0)
   424  	defer tearDown()
   425  
   426  	bc := server.handler.blockchain
   427  
   428  	// Assemble the proofs from the different protocols
   429  	header := bc.GetHeaderByNumber(config.ChtSize - 1)
   430  	rlp, _ := rlp.EncodeToBytes(header)
   431  
   432  	key := make([]byte, 8)
   433  	binary.BigEndian.PutUint64(key, config.ChtSize-1)
   434  
   435  	proofsV2 := HelperTrieResps{
   436  		AuxData: [][]byte{rlp},
   437  	}
   438  	root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
   439  	trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
   440  	trie.Prove(key, 0, &proofsV2.Proofs)
   441  	// Assemble the requests for the different protocols
   442  	requestsV2 := []HelperTrieReq{{
   443  		Type:    htCanonical,
   444  		TrieIdx: 0,
   445  		Key:     key,
   446  		AuxReq:  auxHeader,
   447  	}}
   448  	// Send the proof request and verify the response
   449  	sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requestsV2)
   450  	if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
   451  		t.Errorf("proofs mismatch: %v", err)
   452  	}
   453  }
   454  
   455  func TestGetBloombitsProofsLes2(t *testing.T) { testGetBloombitsProofs(t, 2) }
   456  func TestGetBloombitsProofsLes3(t *testing.T) { testGetBloombitsProofs(t, 3) }
   457  
   458  // Tests that bloombits proofs can be correctly retrieved.
   459  func testGetBloombitsProofs(t *testing.T, protocol int) {
   460  	config := light.TestServerIndexerConfig
   461  
   462  	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
   463  		for {
   464  			bts, _, _ := btIndexer.Sections()
   465  			if bts >= 1 {
   466  				break
   467  			}
   468  			time.Sleep(10 * time.Millisecond)
   469  		}
   470  	}
   471  	server, tearDown := newServerEnv(t, int(config.BloomTrieSize+config.BloomTrieConfirms), protocol, waitIndexers, false, true, 0)
   472  	defer tearDown()
   473  
   474  	bc := server.handler.blockchain
   475  
   476  	// Request and verify each bit of the bloom bits proofs
   477  	for bit := 0; bit < 2048; bit++ {
   478  		// Assemble the request and proofs for the bloombits
   479  		key := make([]byte, 10)
   480  
   481  		binary.BigEndian.PutUint16(key[:2], uint16(bit))
   482  		// Only the first bloom section has data.
   483  		binary.BigEndian.PutUint64(key[2:], 0)
   484  
   485  		requests := []HelperTrieReq{{
   486  			Type:    htBloomBits,
   487  			TrieIdx: 0,
   488  			Key:     key,
   489  		}}
   490  		var proofs HelperTrieResps
   491  
   492  		root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
   493  		trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
   494  		trie.Prove(key, 0, &proofs.Proofs)
   495  
   496  		// Send the proof request and verify the response
   497  		sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requests)
   498  		if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
   499  			t.Errorf("bit %d: proofs mismatch: %v", bit, err)
   500  		}
   501  	}
   502  }
   503  
   504  func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, 2) }
   505  func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, 3) }
   506  
   507  func testTransactionStatus(t *testing.T, protocol int) {
   508  	server, tearDown := newServerEnv(t, 0, protocol, nil, false, true, 0)
   509  	defer tearDown()
   510  	server.handler.addTxsSync = true
   511  
   512  	chain := server.handler.blockchain
   513  
   514  	var reqID uint64
   515  
   516  	test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
   517  		reqID++
   518  		if send {
   519  			sendRequest(server.peer.app, SendTxV2Msg, reqID, types.Transactions{tx})
   520  		} else {
   521  			sendRequest(server.peer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()})
   522  		}
   523  		if err := expectResponse(server.peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
   524  			t.Errorf("transaction status mismatch")
   525  		}
   526  	}
   527  	signer := types.HomesteadSigner{}
   528  
   529  	// test error status by sending an underpriced transaction
   530  	tx0, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)
   531  	test(tx0, true, light.TxStatus{Status: core.TxStatusUnknown, Error: core.ErrUnderpriced.Error()})
   532  
   533  	tx1, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   534  	test(tx1, false, light.TxStatus{Status: core.TxStatusUnknown}) // query before sending, should be unknown
   535  	test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // send valid processable tx, should return pending
   536  	test(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // adding it again should not return an error
   537  
   538  	tx2, _ := types.SignTx(types.NewTransaction(1, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   539  	tx3, _ := types.SignTx(types.NewTransaction(2, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
   540  	// send transactions in the wrong order, tx3 should be queued
   541  	test(tx3, true, light.TxStatus{Status: core.TxStatusQueued})
   542  	test(tx2, true, light.TxStatus{Status: core.TxStatusPending})
   543  	// query again, now tx3 should be pending too
   544  	test(tx3, false, light.TxStatus{Status: core.TxStatusPending})
   545  
   546  	// generate and add a block with tx1 and tx2 included
   547  	gchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 1, func(i int, block *core.BlockGen) {
   548  		block.AddTx(tx1)
   549  		block.AddTx(tx2)
   550  	})
   551  	if _, err := chain.InsertChain(gchain); err != nil {
   552  		panic(err)
   553  	}
   554  	// wait until TxPool processes the inserted block
   555  	for i := 0; i < 10; i++ {
   556  		if pending, _ := server.handler.txpool.Stats(); pending == 1 {
   557  			break
   558  		}
   559  		time.Sleep(100 * time.Millisecond)
   560  	}
   561  	if pending, _ := server.handler.txpool.Stats(); pending != 1 {
   562  		t.Fatalf("pending count mismatch: have %d, want 1", pending)
   563  	}
   564  	// Discard new block announcement
   565  	msg, _ := server.peer.app.ReadMsg()
   566  	msg.Discard()
   567  
   568  	// check if their status is included now
   569  	block1hash := rawdb.ReadCanonicalHash(server.db, 1)
   570  	test(tx1, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
   571  
   572  	test(tx2, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
   573  
   574  	// create a reorg that rolls them back
   575  	gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 2, func(i int, block *core.BlockGen) {})
   576  	if _, err := chain.InsertChain(gchain); err != nil {
   577  		panic(err)
   578  	}
   579  	// wait until TxPool processes the reorg
   580  	for i := 0; i < 10; i++ {
   581  		if pending, _ := server.handler.txpool.Stats(); pending == 3 {
   582  			break
   583  		}
   584  		time.Sleep(100 * time.Millisecond)
   585  	}
   586  	if pending, _ := server.handler.txpool.Stats(); pending != 3 {
   587  		t.Fatalf("pending count mismatch: have %d, want 3", pending)
   588  	}
   589  	// Discard new block announcement
   590  	msg, _ = server.peer.app.ReadMsg()
   591  	msg.Discard()
   592  
   593  	// check if their status is pending again
   594  	test(tx1, false, light.TxStatus{Status: core.TxStatusPending})
   595  	test(tx2, false, light.TxStatus{Status: core.TxStatusPending})
   596  }
   597  
   598  func TestStopResumeLes3(t *testing.T) {
   599  	server, tearDown := newServerEnv(t, 0, 3, nil, true, true, testBufLimit/10)
   600  	defer tearDown()
   601  
   602  	server.handler.server.costTracker.testing = true
   603  
   604  	var (
   605  		reqID    uint64
   606  		expBuf   = testBufLimit
   607  		testCost = testBufLimit / 10
   608  	)
   609  	header := server.handler.blockchain.CurrentHeader()
   610  	req := func() {
   611  		reqID++
   612  		sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
   613  	}
   614  	for i := 1; i <= 5; i++ {
   615  		// send requests while we still have enough buffer and expect a response
   616  		for expBuf >= testCost {
   617  			req()
   618  			expBuf -= testCost
   619  			if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {
   620  				t.Errorf("expected response and failed: %v", err)
   621  			}
   622  		}
   623  		// send some more requests in excess and expect a single StopMsg
   624  		c := i
   625  		for c > 0 {
   626  			req()
   627  			c--
   628  		}
   629  		if err := p2p.ExpectMsg(server.peer.app, StopMsg, nil); err != nil {
   630  			t.Errorf("expected StopMsg and failed: %v", err)
   631  		}
   632  		// wait until the buffer is recharged by half of the limit
   633  		wait := testBufLimit / testBufRecharge / 2
   634  		server.clock.(*mclock.Simulated).Run(time.Millisecond * time.Duration(wait))
   635  
   636  		// expect a ResumeMsg with the partially recharged buffer value
   637  		expBuf += testBufRecharge * wait
   638  		if err := p2p.ExpectMsg(server.peer.app, ResumeMsg, expBuf); err != nil {
   639  			t.Errorf("expected ResumeMsg and failed: %v", err)
   640  		}
   641  	}
   642  }