github.com/calmw/ethereum@v0.1.1/eth/filters/filter_system_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"math/rand"
    25  	"reflect"
    26  	"runtime"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/calmw/ethereum"
    31  	"github.com/calmw/ethereum/common"
    32  	"github.com/calmw/ethereum/consensus/ethash"
    33  	"github.com/calmw/ethereum/core"
    34  	"github.com/calmw/ethereum/core/bloombits"
    35  	"github.com/calmw/ethereum/core/rawdb"
    36  	"github.com/calmw/ethereum/core/types"
    37  	"github.com/calmw/ethereum/crypto"
    38  	"github.com/calmw/ethereum/ethdb"
    39  	"github.com/calmw/ethereum/event"
    40  	"github.com/calmw/ethereum/internal/ethapi"
    41  	"github.com/calmw/ethereum/params"
    42  	"github.com/calmw/ethereum/rpc"
    43  )
    44  
    45  type testBackend struct {
    46  	db              ethdb.Database
    47  	sections        uint64
    48  	txFeed          event.Feed
    49  	logsFeed        event.Feed
    50  	rmLogsFeed      event.Feed
    51  	pendingLogsFeed event.Feed
    52  	chainFeed       event.Feed
    53  }
    54  
    55  func (b *testBackend) ChainConfig() *params.ChainConfig {
    56  	return params.TestChainConfig
    57  }
    58  
    59  func (b *testBackend) CurrentHeader() *types.Header {
    60  	hdr, _ := b.HeaderByNumber(context.TODO(), rpc.LatestBlockNumber)
    61  	return hdr
    62  }
    63  
    64  func (b *testBackend) ChainDb() ethdb.Database {
    65  	return b.db
    66  }
    67  
    68  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    69  	var (
    70  		hash common.Hash
    71  		num  uint64
    72  	)
    73  	switch blockNr {
    74  	case rpc.LatestBlockNumber:
    75  		hash = rawdb.ReadHeadBlockHash(b.db)
    76  		number := rawdb.ReadHeaderNumber(b.db, hash)
    77  		if number == nil {
    78  			return nil, nil
    79  		}
    80  		num = *number
    81  	case rpc.FinalizedBlockNumber:
    82  		hash = rawdb.ReadFinalizedBlockHash(b.db)
    83  		number := rawdb.ReadHeaderNumber(b.db, hash)
    84  		if number == nil {
    85  			return nil, nil
    86  		}
    87  		num = *number
    88  	case rpc.SafeBlockNumber:
    89  		return nil, errors.New("safe block not found")
    90  	default:
    91  		num = uint64(blockNr)
    92  		hash = rawdb.ReadCanonicalHash(b.db, num)
    93  	}
    94  	return rawdb.ReadHeader(b.db, hash, num), nil
    95  }
    96  
    97  func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
    98  	number := rawdb.ReadHeaderNumber(b.db, hash)
    99  	if number == nil {
   100  		return nil, nil
   101  	}
   102  	return rawdb.ReadHeader(b.db, hash, *number), nil
   103  }
   104  
   105  func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
   106  	if body := rawdb.ReadBody(b.db, hash, uint64(number)); body != nil {
   107  		return body, nil
   108  	}
   109  	return nil, errors.New("block body not found")
   110  }
   111  
   112  func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
   113  	if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {
   114  		if header := rawdb.ReadHeader(b.db, hash, *number); header != nil {
   115  			return rawdb.ReadReceipts(b.db, hash, *number, header.Time, params.TestChainConfig), nil
   116  		}
   117  	}
   118  	return nil, nil
   119  }
   120  
   121  func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
   122  	logs := rawdb.ReadLogs(b.db, hash, number, params.TestChainConfig)
   123  	return logs, nil
   124  }
   125  
   126  func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
   127  	return nil, nil
   128  }
   129  
   130  func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
   131  	return b.txFeed.Subscribe(ch)
   132  }
   133  
   134  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
   135  	return b.rmLogsFeed.Subscribe(ch)
   136  }
   137  
   138  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
   139  	return b.logsFeed.Subscribe(ch)
   140  }
   141  
   142  func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
   143  	return b.pendingLogsFeed.Subscribe(ch)
   144  }
   145  
   146  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
   147  	return b.chainFeed.Subscribe(ch)
   148  }
   149  
   150  func (b *testBackend) BloomStatus() (uint64, uint64) {
   151  	return params.BloomBitsBlocks, b.sections
   152  }
   153  
   154  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
   155  	requests := make(chan chan *bloombits.Retrieval)
   156  
   157  	go session.Multiplex(16, 0, requests)
   158  	go func() {
   159  		for {
   160  			// Wait for a service request or a shutdown
   161  			select {
   162  			case <-ctx.Done():
   163  				return
   164  
   165  			case request := <-requests:
   166  				task := <-request
   167  
   168  				task.Bitsets = make([][]byte, len(task.Sections))
   169  				for i, section := range task.Sections {
   170  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   171  						head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   172  						task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
   173  					}
   174  				}
   175  				request <- task
   176  			}
   177  		}
   178  	}()
   179  }
   180  
   181  func newTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config) (*testBackend, *FilterSystem) {
   182  	backend := &testBackend{db: db}
   183  	sys := NewFilterSystem(backend, cfg)
   184  	return backend, sys
   185  }
   186  
   187  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   188  // It creates multiple subscriptions:
   189  // - one at the start and should receive all posted chain events and a second (blockHashes)
   190  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   191  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   192  func TestBlockSubscription(t *testing.T) {
   193  	t.Parallel()
   194  
   195  	var (
   196  		db           = rawdb.NewMemoryDatabase()
   197  		backend, sys = newTestFilterSystem(t, db, Config{})
   198  		api          = NewFilterAPI(sys, false)
   199  		genesis      = &core.Genesis{
   200  			Config:  params.TestChainConfig,
   201  			BaseFee: big.NewInt(params.InitialBaseFee),
   202  		}
   203  		_, chain, _ = core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 10, func(i int, gen *core.BlockGen) {})
   204  		chainEvents = []core.ChainEvent{}
   205  	)
   206  
   207  	for _, blk := range chain {
   208  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   209  	}
   210  
   211  	chan0 := make(chan *types.Header)
   212  	sub0 := api.events.SubscribeNewHeads(chan0)
   213  	chan1 := make(chan *types.Header)
   214  	sub1 := api.events.SubscribeNewHeads(chan1)
   215  
   216  	go func() { // simulate client
   217  		i1, i2 := 0, 0
   218  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   219  			select {
   220  			case header := <-chan0:
   221  				if chainEvents[i1].Hash != header.Hash() {
   222  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   223  				}
   224  				i1++
   225  			case header := <-chan1:
   226  				if chainEvents[i2].Hash != header.Hash() {
   227  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   228  				}
   229  				i2++
   230  			}
   231  		}
   232  
   233  		sub0.Unsubscribe()
   234  		sub1.Unsubscribe()
   235  	}()
   236  
   237  	time.Sleep(1 * time.Second)
   238  	for _, e := range chainEvents {
   239  		backend.chainFeed.Send(e)
   240  	}
   241  
   242  	<-sub0.Err()
   243  	<-sub1.Err()
   244  }
   245  
   246  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   247  func TestPendingTxFilter(t *testing.T) {
   248  	t.Parallel()
   249  
   250  	var (
   251  		db           = rawdb.NewMemoryDatabase()
   252  		backend, sys = newTestFilterSystem(t, db, Config{})
   253  		api          = NewFilterAPI(sys, false)
   254  
   255  		transactions = []*types.Transaction{
   256  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   257  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   258  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   259  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   260  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   261  		}
   262  
   263  		hashes []common.Hash
   264  	)
   265  
   266  	fid0 := api.NewPendingTransactionFilter(nil)
   267  
   268  	time.Sleep(1 * time.Second)
   269  	backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
   270  
   271  	timeout := time.Now().Add(1 * time.Second)
   272  	for {
   273  		results, err := api.GetFilterChanges(fid0)
   274  		if err != nil {
   275  			t.Fatalf("Unable to retrieve logs: %v", err)
   276  		}
   277  
   278  		h := results.([]common.Hash)
   279  		hashes = append(hashes, h...)
   280  		if len(hashes) >= len(transactions) {
   281  			break
   282  		}
   283  		// check timeout
   284  		if time.Now().After(timeout) {
   285  			break
   286  		}
   287  
   288  		time.Sleep(100 * time.Millisecond)
   289  	}
   290  
   291  	if len(hashes) != len(transactions) {
   292  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   293  		return
   294  	}
   295  	for i := range hashes {
   296  		if hashes[i] != transactions[i].Hash() {
   297  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   298  		}
   299  	}
   300  }
   301  
   302  // TestPendingTxFilterFullTx tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   303  func TestPendingTxFilterFullTx(t *testing.T) {
   304  	t.Parallel()
   305  
   306  	var (
   307  		db           = rawdb.NewMemoryDatabase()
   308  		backend, sys = newTestFilterSystem(t, db, Config{})
   309  		api          = NewFilterAPI(sys, false)
   310  
   311  		transactions = []*types.Transaction{
   312  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   313  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   314  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   315  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   316  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   317  		}
   318  
   319  		txs []*ethapi.RPCTransaction
   320  	)
   321  
   322  	fullTx := true
   323  	fid0 := api.NewPendingTransactionFilter(&fullTx)
   324  
   325  	time.Sleep(1 * time.Second)
   326  	backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
   327  
   328  	timeout := time.Now().Add(1 * time.Second)
   329  	for {
   330  		results, err := api.GetFilterChanges(fid0)
   331  		if err != nil {
   332  			t.Fatalf("Unable to retrieve logs: %v", err)
   333  		}
   334  
   335  		tx := results.([]*ethapi.RPCTransaction)
   336  		txs = append(txs, tx...)
   337  		if len(txs) >= len(transactions) {
   338  			break
   339  		}
   340  		// check timeout
   341  		if time.Now().After(timeout) {
   342  			break
   343  		}
   344  
   345  		time.Sleep(100 * time.Millisecond)
   346  	}
   347  
   348  	if len(txs) != len(transactions) {
   349  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(txs))
   350  		return
   351  	}
   352  	for i := range txs {
   353  		if txs[i].Hash != transactions[i].Hash() {
   354  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), txs[i].Hash)
   355  		}
   356  	}
   357  }
   358  
   359  // TestLogFilterCreation test whether a given filter criteria makes sense.
   360  // If not it must return an error.
   361  func TestLogFilterCreation(t *testing.T) {
   362  	var (
   363  		db     = rawdb.NewMemoryDatabase()
   364  		_, sys = newTestFilterSystem(t, db, Config{})
   365  		api    = NewFilterAPI(sys, false)
   366  
   367  		testCases = []struct {
   368  			crit    FilterCriteria
   369  			success bool
   370  		}{
   371  			// defaults
   372  			{FilterCriteria{}, true},
   373  			// valid block number range
   374  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   375  			// "mined" block range to pending
   376  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   377  			// new mined and pending blocks
   378  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   379  			// from block "higher" than to block
   380  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   381  			// from block "higher" than to block
   382  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   383  			// from block "higher" than to block
   384  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   385  			// from block "higher" than to block
   386  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   387  		}
   388  	)
   389  
   390  	for i, test := range testCases {
   391  		id, err := api.NewFilter(test.crit)
   392  		if err != nil && test.success {
   393  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   394  		}
   395  		if err == nil {
   396  			api.UninstallFilter(id)
   397  			if !test.success {
   398  				t.Errorf("expected testcase %d to fail with an error", i)
   399  			}
   400  		}
   401  	}
   402  }
   403  
   404  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   405  // when the filter is created.
   406  func TestInvalidLogFilterCreation(t *testing.T) {
   407  	t.Parallel()
   408  
   409  	var (
   410  		db     = rawdb.NewMemoryDatabase()
   411  		_, sys = newTestFilterSystem(t, db, Config{})
   412  		api    = NewFilterAPI(sys, false)
   413  	)
   414  
   415  	// different situations where log filter creation should fail.
   416  	// Reason: fromBlock > toBlock
   417  	testCases := []FilterCriteria{
   418  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   419  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   420  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   421  	}
   422  
   423  	for i, test := range testCases {
   424  		if _, err := api.NewFilter(test); err == nil {
   425  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   426  		}
   427  	}
   428  }
   429  
   430  func TestInvalidGetLogsRequest(t *testing.T) {
   431  	var (
   432  		db        = rawdb.NewMemoryDatabase()
   433  		_, sys    = newTestFilterSystem(t, db, Config{})
   434  		api       = NewFilterAPI(sys, false)
   435  		blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   436  	)
   437  
   438  	// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
   439  	testCases := []FilterCriteria{
   440  		0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)},
   441  		1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)},
   442  		2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   443  	}
   444  
   445  	for i, test := range testCases {
   446  		if _, err := api.GetLogs(context.Background(), test); err == nil {
   447  			t.Errorf("Expected Logs for case #%d to fail", i)
   448  		}
   449  	}
   450  }
   451  
   452  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   453  func TestLogFilter(t *testing.T) {
   454  	t.Parallel()
   455  
   456  	var (
   457  		db           = rawdb.NewMemoryDatabase()
   458  		backend, sys = newTestFilterSystem(t, db, Config{})
   459  		api          = NewFilterAPI(sys, false)
   460  
   461  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   462  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   463  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   464  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   465  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   466  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   467  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   468  
   469  		// posted twice, once as regular logs and once as pending logs.
   470  		allLogs = []*types.Log{
   471  			{Address: firstAddr},
   472  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   473  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   474  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   475  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   476  		}
   477  
   478  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   479  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   480  
   481  		testCases = []struct {
   482  			crit     FilterCriteria
   483  			expected []*types.Log
   484  			id       rpc.ID
   485  		}{
   486  			// match all
   487  			0: {FilterCriteria{}, allLogs, ""},
   488  			// match none due to no matching addresses
   489  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   490  			// match logs based on addresses, ignore topics
   491  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   492  			// match none due to no matching topics (match with address)
   493  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   494  			// match logs based on addresses and topics
   495  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   496  			// match logs based on multiple addresses and "or" topics
   497  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   498  			// logs in the pending block
   499  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   500  			// mined logs with block num >= 2 or pending logs
   501  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   502  			// all "mined" logs with block num >= 2
   503  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   504  			// all "mined" logs
   505  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   506  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   507  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   508  			// all "mined" and pending logs with topic firstTopic
   509  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   510  			// match all logs due to wildcard topic
   511  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   512  		}
   513  	)
   514  
   515  	// create all filters
   516  	for i := range testCases {
   517  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   518  	}
   519  
   520  	// raise events
   521  	time.Sleep(1 * time.Second)
   522  	if nsend := backend.logsFeed.Send(allLogs); nsend == 0 {
   523  		t.Fatal("Logs event not delivered")
   524  	}
   525  	if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 {
   526  		t.Fatal("Pending logs event not delivered")
   527  	}
   528  
   529  	for i, tt := range testCases {
   530  		var fetched []*types.Log
   531  		timeout := time.Now().Add(1 * time.Second)
   532  		for { // fetch all expected logs
   533  			results, err := api.GetFilterChanges(tt.id)
   534  			if err != nil {
   535  				t.Fatalf("Unable to fetch logs: %v", err)
   536  			}
   537  
   538  			fetched = append(fetched, results.([]*types.Log)...)
   539  			if len(fetched) >= len(tt.expected) {
   540  				break
   541  			}
   542  			// check timeout
   543  			if time.Now().After(timeout) {
   544  				break
   545  			}
   546  
   547  			time.Sleep(100 * time.Millisecond)
   548  		}
   549  
   550  		if len(fetched) != len(tt.expected) {
   551  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   552  			return
   553  		}
   554  
   555  		for l := range fetched {
   556  			if fetched[l].Removed {
   557  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   558  			}
   559  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   560  				t.Errorf("invalid log on index %d for case %d", l, i)
   561  			}
   562  		}
   563  	}
   564  }
   565  
   566  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   567  func TestPendingLogsSubscription(t *testing.T) {
   568  	t.Parallel()
   569  
   570  	var (
   571  		db           = rawdb.NewMemoryDatabase()
   572  		backend, sys = newTestFilterSystem(t, db, Config{})
   573  		api          = NewFilterAPI(sys, false)
   574  
   575  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   576  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   577  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   578  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   579  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   580  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   581  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   582  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   583  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   584  
   585  		allLogs = [][]*types.Log{
   586  			{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}},
   587  			{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}},
   588  			{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}},
   589  			{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}},
   590  			{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}},
   591  			{
   592  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   593  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   594  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   595  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   596  			},
   597  		}
   598  
   599  		pendingBlockNumber = big.NewInt(rpc.PendingBlockNumber.Int64())
   600  
   601  		testCases = []struct {
   602  			crit     ethereum.FilterQuery
   603  			expected []*types.Log
   604  			c        chan []*types.Log
   605  			sub      *Subscription
   606  			err      chan error
   607  		}{
   608  			// match all
   609  			{
   610  				ethereum.FilterQuery{FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   611  				flattenLogs(allLogs),
   612  				nil, nil, nil,
   613  			},
   614  			// match none due to no matching addresses
   615  			{
   616  				ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   617  				nil,
   618  				nil, nil, nil,
   619  			},
   620  			// match logs based on addresses, ignore topics
   621  			{
   622  				ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   623  				append(flattenLogs(allLogs[:2]), allLogs[5][3]),
   624  				nil, nil, nil,
   625  			},
   626  			// match none due to no matching topics (match with address)
   627  			{
   628  				ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   629  				nil,
   630  				nil, nil, nil,
   631  			},
   632  			// match logs based on addresses and topics
   633  			{
   634  				ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   635  				append(flattenLogs(allLogs[3:5]), allLogs[5][0]),
   636  				nil, nil, nil,
   637  			},
   638  			// match logs based on multiple addresses and "or" topics
   639  			{
   640  				ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   641  				append(flattenLogs(allLogs[2:5]), allLogs[5][0]),
   642  				nil, nil, nil,
   643  			},
   644  			// multiple pending logs, should match only 2 topics from the logs in block 5
   645  			{
   646  				ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   647  				[]*types.Log{allLogs[5][0], allLogs[5][2]},
   648  				nil, nil, nil,
   649  			},
   650  			// match none due to only matching new mined logs
   651  			{
   652  				ethereum.FilterQuery{},
   653  				nil,
   654  				nil, nil, nil,
   655  			},
   656  			// match none due to only matching mined logs within a specific block range
   657  			{
   658  				ethereum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)},
   659  				nil,
   660  				nil, nil, nil,
   661  			},
   662  			// match all due to matching mined and pending logs
   663  			{
   664  				ethereum.FilterQuery{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())},
   665  				flattenLogs(allLogs),
   666  				nil, nil, nil,
   667  			},
   668  			// match none due to matching logs from a specific block number to new mined blocks
   669  			{
   670  				ethereum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   671  				nil,
   672  				nil, nil, nil,
   673  			},
   674  		}
   675  	)
   676  
   677  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   678  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   679  	// (some) events are posted.
   680  	for i := range testCases {
   681  		testCases[i].c = make(chan []*types.Log)
   682  		testCases[i].err = make(chan error, 1)
   683  
   684  		var err error
   685  		testCases[i].sub, err = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   686  		if err != nil {
   687  			t.Fatalf("SubscribeLogs %d failed: %v\n", i, err)
   688  		}
   689  	}
   690  
   691  	for n, test := range testCases {
   692  		i := n
   693  		tt := test
   694  		go func() {
   695  			defer tt.sub.Unsubscribe()
   696  
   697  			var fetched []*types.Log
   698  
   699  			timeout := time.After(1 * time.Second)
   700  		fetchLoop:
   701  			for {
   702  				select {
   703  				case logs := <-tt.c:
   704  					// Do not break early if we've fetched greater, or equal,
   705  					// to the number of logs expected. This ensures we do not
   706  					// deadlock the filter system because it will do a blocking
   707  					// send on this channel if another log arrives.
   708  					fetched = append(fetched, logs...)
   709  				case <-timeout:
   710  					break fetchLoop
   711  				}
   712  			}
   713  
   714  			if len(fetched) != len(tt.expected) {
   715  				tt.err <- fmt.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   716  				return
   717  			}
   718  
   719  			for l := range fetched {
   720  				if fetched[l].Removed {
   721  					tt.err <- fmt.Errorf("expected log not to be removed for log %d in case %d", l, i)
   722  					return
   723  				}
   724  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   725  					tt.err <- fmt.Errorf("invalid log on index %d for case %d\n", l, i)
   726  					return
   727  				}
   728  			}
   729  			tt.err <- nil
   730  		}()
   731  	}
   732  
   733  	// raise events
   734  	for _, ev := range allLogs {
   735  		backend.pendingLogsFeed.Send(ev)
   736  	}
   737  
   738  	for i := range testCases {
   739  		err := <-testCases[i].err
   740  		if err != nil {
   741  			t.Fatalf("test %d failed: %v", i, err)
   742  		}
   743  		<-testCases[i].sub.Err()
   744  	}
   745  }
   746  
   747  func TestLightFilterLogs(t *testing.T) {
   748  	t.Parallel()
   749  
   750  	var (
   751  		db           = rawdb.NewMemoryDatabase()
   752  		backend, sys = newTestFilterSystem(t, db, Config{})
   753  		api          = NewFilterAPI(sys, true)
   754  		signer       = types.HomesteadSigner{}
   755  
   756  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   757  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   758  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   759  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   760  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   761  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   762  
   763  		// posted twice, once as regular logs and once as pending logs.
   764  		allLogs = []*types.Log{
   765  			// Block 1
   766  			{Address: firstAddr, Topics: []common.Hash{}, Data: []byte{}, BlockNumber: 2, Index: 0},
   767  			// Block 2
   768  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 0},
   769  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 1},
   770  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 3, Index: 2},
   771  			// Block 3
   772  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 4, Index: 0},
   773  		}
   774  
   775  		testCases = []struct {
   776  			crit     FilterCriteria
   777  			expected []*types.Log
   778  			id       rpc.ID
   779  		}{
   780  			// match all
   781  			0: {FilterCriteria{}, allLogs, ""},
   782  			// match none due to no matching addresses
   783  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   784  			// match logs based on addresses, ignore topics
   785  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   786  			// match logs based on addresses and topics
   787  			3: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   788  			// all logs with block num >= 3
   789  			4: {FilterCriteria{FromBlock: big.NewInt(3), ToBlock: big.NewInt(5)}, allLogs[1:], ""},
   790  			// all logs
   791  			5: {FilterCriteria{FromBlock: big.NewInt(0), ToBlock: big.NewInt(5)}, allLogs, ""},
   792  			// all logs with 1>= block num <=2 and topic secondTopic
   793  			6: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(3), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   794  		}
   795  
   796  		key, _  = crypto.GenerateKey()
   797  		addr    = crypto.PubkeyToAddress(key.PublicKey)
   798  		genesis = &core.Genesis{Config: params.TestChainConfig,
   799  			Alloc: core.GenesisAlloc{
   800  				addr: {Balance: big.NewInt(params.Ether)},
   801  			},
   802  		}
   803  		receipts = []*types.Receipt{{
   804  			Logs: []*types.Log{allLogs[0]},
   805  		}, {
   806  			Logs: []*types.Log{allLogs[1], allLogs[2], allLogs[3]},
   807  		}, {
   808  			Logs: []*types.Log{allLogs[4]},
   809  		}}
   810  	)
   811  
   812  	_, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 4, func(i int, b *core.BlockGen) {
   813  		if i == 0 {
   814  			return
   815  		}
   816  		receipts[i-1].Bloom = types.CreateBloom(types.Receipts{receipts[i-1]})
   817  		b.AddUncheckedReceipt(receipts[i-1])
   818  		tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i - 1), To: &common.Address{}, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, key)
   819  		b.AddTx(tx)
   820  	})
   821  	for i, block := range blocks {
   822  		rawdb.WriteBlock(db, block)
   823  		rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
   824  		rawdb.WriteHeadBlockHash(db, block.Hash())
   825  		if i > 0 {
   826  			rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), []*types.Receipt{receipts[i-1]})
   827  		}
   828  	}
   829  	// create all filters
   830  	for i := range testCases {
   831  		id, err := api.NewFilter(testCases[i].crit)
   832  		if err != nil {
   833  			t.Fatal(err)
   834  		}
   835  		testCases[i].id = id
   836  	}
   837  
   838  	// raise events
   839  	time.Sleep(1 * time.Second)
   840  	for _, block := range blocks {
   841  		backend.chainFeed.Send(core.ChainEvent{Block: block, Hash: common.Hash{}, Logs: allLogs})
   842  	}
   843  
   844  	for i, tt := range testCases {
   845  		var fetched []*types.Log
   846  		timeout := time.Now().Add(1 * time.Second)
   847  		for { // fetch all expected logs
   848  			results, err := api.GetFilterChanges(tt.id)
   849  			if err != nil {
   850  				t.Fatalf("Unable to fetch logs: %v", err)
   851  			}
   852  			fetched = append(fetched, results.([]*types.Log)...)
   853  			if len(fetched) >= len(tt.expected) {
   854  				break
   855  			}
   856  			// check timeout
   857  			if time.Now().After(timeout) {
   858  				break
   859  			}
   860  
   861  			time.Sleep(100 * time.Millisecond)
   862  		}
   863  
   864  		if len(fetched) != len(tt.expected) {
   865  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   866  			return
   867  		}
   868  
   869  		for l := range fetched {
   870  			if fetched[l].Removed {
   871  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   872  			}
   873  			expected := *tt.expected[l]
   874  			blockNum := expected.BlockNumber - 1
   875  			expected.BlockHash = blocks[blockNum].Hash()
   876  			expected.TxHash = blocks[blockNum].Transactions()[0].Hash()
   877  			if !reflect.DeepEqual(fetched[l], &expected) {
   878  				t.Errorf("invalid log on index %d for case %d", l, i)
   879  			}
   880  		}
   881  	}
   882  }
   883  
   884  // TestPendingTxFilterDeadlock tests if the event loop hangs when pending
   885  // txes arrive at the same time that one of multiple filters is timing out.
   886  // Please refer to #22131 for more details.
   887  func TestPendingTxFilterDeadlock(t *testing.T) {
   888  	t.Parallel()
   889  	timeout := 100 * time.Millisecond
   890  
   891  	var (
   892  		db           = rawdb.NewMemoryDatabase()
   893  		backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout})
   894  		api          = NewFilterAPI(sys, false)
   895  		done         = make(chan struct{})
   896  	)
   897  
   898  	go func() {
   899  		// Bombard feed with txes until signal was received to stop
   900  		i := uint64(0)
   901  		for {
   902  			select {
   903  			case <-done:
   904  				return
   905  			default:
   906  			}
   907  
   908  			tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil)
   909  			backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}})
   910  			i++
   911  		}
   912  	}()
   913  
   914  	// Create a bunch of filters that will
   915  	// timeout either in 100ms or 200ms
   916  	fids := make([]rpc.ID, 20)
   917  	for i := 0; i < len(fids); i++ {
   918  		fid := api.NewPendingTransactionFilter(nil)
   919  		fids[i] = fid
   920  		// Wait for at least one tx to arrive in filter
   921  		for {
   922  			hashes, err := api.GetFilterChanges(fid)
   923  			if err != nil {
   924  				t.Fatalf("Filter should exist: %v\n", err)
   925  			}
   926  			if len(hashes.([]common.Hash)) > 0 {
   927  				break
   928  			}
   929  			runtime.Gosched()
   930  		}
   931  	}
   932  
   933  	// Wait until filters have timed out
   934  	time.Sleep(3 * timeout)
   935  
   936  	// If tx loop doesn't consume `done` after a second
   937  	// it's hanging.
   938  	select {
   939  	case done <- struct{}{}:
   940  		// Check that all filters have been uninstalled
   941  		for _, fid := range fids {
   942  			if _, err := api.GetFilterChanges(fid); err == nil {
   943  				t.Errorf("Filter %s should have been uninstalled\n", fid)
   944  			}
   945  		}
   946  	case <-time.After(1 * time.Second):
   947  		t.Error("Tx sending loop hangs")
   948  	}
   949  }
   950  
   951  func flattenLogs(pl [][]*types.Log) []*types.Log {
   952  	var logs []*types.Log
   953  	for _, l := range pl {
   954  		logs = append(logs, l...)
   955  	}
   956  	return logs
   957  }