github.com/daefrom/go-dae@v1.0.1/eth/filters/filter_system_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/big"
    23  	"math/rand"
    24  	"reflect"
    25  	"runtime"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/daefrom/go-dae"
    30  	"github.com/daefrom/go-dae/common"
    31  	"github.com/daefrom/go-dae/consensus/ethash"
    32  	"github.com/daefrom/go-dae/core"
    33  	"github.com/daefrom/go-dae/core/bloombits"
    34  	"github.com/daefrom/go-dae/core/rawdb"
    35  	"github.com/daefrom/go-dae/core/types"
    36  	"github.com/daefrom/go-dae/ethdb"
    37  	"github.com/daefrom/go-dae/event"
    38  	"github.com/daefrom/go-dae/params"
    39  	"github.com/daefrom/go-dae/rpc"
    40  )
    41  
    42  var (
    43  	deadline = 5 * time.Minute
    44  )
    45  
    46  type testBackend struct {
    47  	db              ethdb.Database
    48  	sections        uint64
    49  	txFeed          event.Feed
    50  	logsFeed        event.Feed
    51  	rmLogsFeed      event.Feed
    52  	pendingLogsFeed event.Feed
    53  	chainFeed       event.Feed
    54  }
    55  
    56  func (b *testBackend) ChainDb() ethdb.Database {
    57  	return b.db
    58  }
    59  
    60  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    61  	var (
    62  		hash common.Hash
    63  		num  uint64
    64  	)
    65  	if blockNr == rpc.LatestBlockNumber {
    66  		hash = rawdb.ReadHeadBlockHash(b.db)
    67  		number := rawdb.ReadHeaderNumber(b.db, hash)
    68  		if number == nil {
    69  			return nil, nil
    70  		}
    71  		num = *number
    72  	} else {
    73  		num = uint64(blockNr)
    74  		hash = rawdb.ReadCanonicalHash(b.db, num)
    75  	}
    76  	return rawdb.ReadHeader(b.db, hash, num), nil
    77  }
    78  
    79  func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
    80  	number := rawdb.ReadHeaderNumber(b.db, hash)
    81  	if number == nil {
    82  		return nil, nil
    83  	}
    84  	return rawdb.ReadHeader(b.db, hash, *number), nil
    85  }
    86  
    87  func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
    88  	if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {
    89  		return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil
    90  	}
    91  	return nil, nil
    92  }
    93  
    94  func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
    95  	number := rawdb.ReadHeaderNumber(b.db, hash)
    96  	if number == nil {
    97  		return nil, nil
    98  	}
    99  	receipts := rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig)
   100  
   101  	logs := make([][]*types.Log, len(receipts))
   102  	for i, receipt := range receipts {
   103  		logs[i] = receipt.Logs
   104  	}
   105  	return logs, nil
   106  }
   107  
   108  func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
   109  	return nil, nil
   110  }
   111  
   112  func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
   113  	return b.txFeed.Subscribe(ch)
   114  }
   115  
   116  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
   117  	return b.rmLogsFeed.Subscribe(ch)
   118  }
   119  
   120  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
   121  	return b.logsFeed.Subscribe(ch)
   122  }
   123  
   124  func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
   125  	return b.pendingLogsFeed.Subscribe(ch)
   126  }
   127  
   128  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
   129  	return b.chainFeed.Subscribe(ch)
   130  }
   131  
   132  func (b *testBackend) BloomStatus() (uint64, uint64) {
   133  	return params.BloomBitsBlocks, b.sections
   134  }
   135  
   136  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
   137  	requests := make(chan chan *bloombits.Retrieval)
   138  
   139  	go session.Multiplex(16, 0, requests)
   140  	go func() {
   141  		for {
   142  			// Wait for a service request or a shutdown
   143  			select {
   144  			case <-ctx.Done():
   145  				return
   146  
   147  			case request := <-requests:
   148  				task := <-request
   149  
   150  				task.Bitsets = make([][]byte, len(task.Sections))
   151  				for i, section := range task.Sections {
   152  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   153  						head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   154  						task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
   155  					}
   156  				}
   157  				request <- task
   158  			}
   159  		}
   160  	}()
   161  }
   162  
   163  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   164  // It creates multiple subscriptions:
   165  // - one at the start and should receive all posted chain events and a second (blockHashes)
   166  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   167  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   168  func TestBlockSubscription(t *testing.T) {
   169  	t.Parallel()
   170  
   171  	var (
   172  		db          = rawdb.NewMemoryDatabase()
   173  		backend     = &testBackend{db: db}
   174  		api         = NewFilterAPI(backend, false, deadline)
   175  		genesis     = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
   176  		chain, _    = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
   177  		chainEvents = []core.ChainEvent{}
   178  	)
   179  
   180  	for _, blk := range chain {
   181  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   182  	}
   183  
   184  	chan0 := make(chan *types.Header)
   185  	sub0 := api.events.SubscribeNewHeads(chan0)
   186  	chan1 := make(chan *types.Header)
   187  	sub1 := api.events.SubscribeNewHeads(chan1)
   188  
   189  	go func() { // simulate client
   190  		i1, i2 := 0, 0
   191  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   192  			select {
   193  			case header := <-chan0:
   194  				if chainEvents[i1].Hash != header.Hash() {
   195  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   196  				}
   197  				i1++
   198  			case header := <-chan1:
   199  				if chainEvents[i2].Hash != header.Hash() {
   200  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   201  				}
   202  				i2++
   203  			}
   204  		}
   205  
   206  		sub0.Unsubscribe()
   207  		sub1.Unsubscribe()
   208  	}()
   209  
   210  	time.Sleep(1 * time.Second)
   211  	for _, e := range chainEvents {
   212  		backend.chainFeed.Send(e)
   213  	}
   214  
   215  	<-sub0.Err()
   216  	<-sub1.Err()
   217  }
   218  
   219  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   220  func TestPendingTxFilter(t *testing.T) {
   221  	t.Parallel()
   222  
   223  	var (
   224  		db      = rawdb.NewMemoryDatabase()
   225  		backend = &testBackend{db: db}
   226  		api     = NewFilterAPI(backend, false, deadline)
   227  
   228  		transactions = []*types.Transaction{
   229  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   230  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   231  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   232  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   233  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   234  		}
   235  
   236  		hashes []common.Hash
   237  	)
   238  
   239  	fid0 := api.NewPendingTransactionFilter()
   240  
   241  	time.Sleep(1 * time.Second)
   242  	backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
   243  
   244  	timeout := time.Now().Add(1 * time.Second)
   245  	for {
   246  		results, err := api.GetFilterChanges(fid0)
   247  		if err != nil {
   248  			t.Fatalf("Unable to retrieve logs: %v", err)
   249  		}
   250  
   251  		h := results.([]common.Hash)
   252  		hashes = append(hashes, h...)
   253  		if len(hashes) >= len(transactions) {
   254  			break
   255  		}
   256  		// check timeout
   257  		if time.Now().After(timeout) {
   258  			break
   259  		}
   260  
   261  		time.Sleep(100 * time.Millisecond)
   262  	}
   263  
   264  	if len(hashes) != len(transactions) {
   265  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   266  		return
   267  	}
   268  	for i := range hashes {
   269  		if hashes[i] != transactions[i].Hash() {
   270  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   271  		}
   272  	}
   273  }
   274  
   275  // TestLogFilterCreation test whether a given filter criteria makes sense.
   276  // If not it must return an error.
   277  func TestLogFilterCreation(t *testing.T) {
   278  	var (
   279  		db      = rawdb.NewMemoryDatabase()
   280  		backend = &testBackend{db: db}
   281  		api     = NewFilterAPI(backend, false, deadline)
   282  
   283  		testCases = []struct {
   284  			crit    FilterCriteria
   285  			success bool
   286  		}{
   287  			// defaults
   288  			{FilterCriteria{}, true},
   289  			// valid block number range
   290  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   291  			// "mined" block range to pending
   292  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   293  			// new mined and pending blocks
   294  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   295  			// from block "higher" than to block
   296  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   297  			// from block "higher" than to block
   298  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   299  			// from block "higher" than to block
   300  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   301  			// from block "higher" than to block
   302  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   303  		}
   304  	)
   305  
   306  	for i, test := range testCases {
   307  		id, err := api.NewFilter(test.crit)
   308  		if err != nil && test.success {
   309  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   310  		}
   311  		if err == nil {
   312  			api.UninstallFilter(id)
   313  			if !test.success {
   314  				t.Errorf("expected testcase %d to fail with an error", i)
   315  			}
   316  		}
   317  	}
   318  }
   319  
   320  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   321  // when the filter is created.
   322  func TestInvalidLogFilterCreation(t *testing.T) {
   323  	t.Parallel()
   324  
   325  	var (
   326  		db      = rawdb.NewMemoryDatabase()
   327  		backend = &testBackend{db: db}
   328  		api     = NewFilterAPI(backend, false, deadline)
   329  	)
   330  
   331  	// different situations where log filter creation should fail.
   332  	// Reason: fromBlock > toBlock
   333  	testCases := []FilterCriteria{
   334  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   335  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   336  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   337  	}
   338  
   339  	for i, test := range testCases {
   340  		if _, err := api.NewFilter(test); err == nil {
   341  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   342  		}
   343  	}
   344  }
   345  
   346  func TestInvalidGetLogsRequest(t *testing.T) {
   347  	var (
   348  		db        = rawdb.NewMemoryDatabase()
   349  		backend   = &testBackend{db: db}
   350  		api       = NewFilterAPI(backend, false, deadline)
   351  		blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   352  	)
   353  
   354  	// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
   355  	testCases := []FilterCriteria{
   356  		0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)},
   357  		1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)},
   358  		2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   359  	}
   360  
   361  	for i, test := range testCases {
   362  		if _, err := api.GetLogs(context.Background(), test); err == nil {
   363  			t.Errorf("Expected Logs for case #%d to fail", i)
   364  		}
   365  	}
   366  }
   367  
   368  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   369  func TestLogFilter(t *testing.T) {
   370  	t.Parallel()
   371  
   372  	var (
   373  		db      = rawdb.NewMemoryDatabase()
   374  		backend = &testBackend{db: db}
   375  		api     = NewFilterAPI(backend, false, deadline)
   376  
   377  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   378  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   379  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   380  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   381  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   382  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   383  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   384  
   385  		// posted twice, once as regular logs and once as pending logs.
   386  		allLogs = []*types.Log{
   387  			{Address: firstAddr},
   388  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   389  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   390  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   391  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   392  		}
   393  
   394  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   395  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   396  
   397  		testCases = []struct {
   398  			crit     FilterCriteria
   399  			expected []*types.Log
   400  			id       rpc.ID
   401  		}{
   402  			// match all
   403  			0: {FilterCriteria{}, allLogs, ""},
   404  			// match none due to no matching addresses
   405  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   406  			// match logs based on addresses, ignore topics
   407  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   408  			// match none due to no matching topics (match with address)
   409  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   410  			// match logs based on addresses and topics
   411  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   412  			// match logs based on multiple addresses and "or" topics
   413  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   414  			// logs in the pending block
   415  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   416  			// mined logs with block num >= 2 or pending logs
   417  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   418  			// all "mined" logs with block num >= 2
   419  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   420  			// all "mined" logs
   421  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   422  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   423  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   424  			// all "mined" and pending logs with topic firstTopic
   425  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   426  			// match all logs due to wildcard topic
   427  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   428  		}
   429  	)
   430  
   431  	// create all filters
   432  	for i := range testCases {
   433  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   434  	}
   435  
   436  	// raise events
   437  	time.Sleep(1 * time.Second)
   438  	if nsend := backend.logsFeed.Send(allLogs); nsend == 0 {
   439  		t.Fatal("Logs event not delivered")
   440  	}
   441  	if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 {
   442  		t.Fatal("Pending logs event not delivered")
   443  	}
   444  
   445  	for i, tt := range testCases {
   446  		var fetched []*types.Log
   447  		timeout := time.Now().Add(1 * time.Second)
   448  		for { // fetch all expected logs
   449  			results, err := api.GetFilterChanges(tt.id)
   450  			if err != nil {
   451  				t.Fatalf("Unable to fetch logs: %v", err)
   452  			}
   453  
   454  			fetched = append(fetched, results.([]*types.Log)...)
   455  			if len(fetched) >= len(tt.expected) {
   456  				break
   457  			}
   458  			// check timeout
   459  			if time.Now().After(timeout) {
   460  				break
   461  			}
   462  
   463  			time.Sleep(100 * time.Millisecond)
   464  		}
   465  
   466  		if len(fetched) != len(tt.expected) {
   467  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   468  			return
   469  		}
   470  
   471  		for l := range fetched {
   472  			if fetched[l].Removed {
   473  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   474  			}
   475  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   476  				t.Errorf("invalid log on index %d for case %d", l, i)
   477  			}
   478  		}
   479  	}
   480  }
   481  
   482  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   483  func TestPendingLogsSubscription(t *testing.T) {
   484  	t.Parallel()
   485  
   486  	var (
   487  		db      = rawdb.NewMemoryDatabase()
   488  		backend = &testBackend{db: db}
   489  		api     = NewFilterAPI(backend, false, deadline)
   490  
   491  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   492  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   493  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   494  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   495  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   496  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   497  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   498  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   499  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   500  
   501  		allLogs = [][]*types.Log{
   502  			{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}},
   503  			{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}},
   504  			{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}},
   505  			{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}},
   506  			{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}},
   507  			{
   508  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   509  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   510  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   511  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   512  			},
   513  		}
   514  
   515  		pendingBlockNumber = big.NewInt(rpc.PendingBlockNumber.Int64())
   516  
   517  		testCases = []struct {
   518  			crit     ethereum.FilterQuery
   519  			expected []*types.Log
   520  			c        chan []*types.Log
   521  			sub      *Subscription
   522  			err      chan error
   523  		}{
   524  			// match all
   525  			{
   526  				ethereum.FilterQuery{FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   527  				flattenLogs(allLogs),
   528  				nil, nil, nil,
   529  			},
   530  			// match none due to no matching addresses
   531  			{
   532  				ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   533  				nil,
   534  				nil, nil, nil,
   535  			},
   536  			// match logs based on addresses, ignore topics
   537  			{
   538  				ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   539  				append(flattenLogs(allLogs[:2]), allLogs[5][3]),
   540  				nil, nil, nil,
   541  			},
   542  			// match none due to no matching topics (match with address)
   543  			{
   544  				ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   545  				nil,
   546  				nil, nil, nil,
   547  			},
   548  			// match logs based on addresses and topics
   549  			{
   550  				ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   551  				append(flattenLogs(allLogs[3:5]), allLogs[5][0]),
   552  				nil, nil, nil,
   553  			},
   554  			// match logs based on multiple addresses and "or" topics
   555  			{
   556  				ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   557  				append(flattenLogs(allLogs[2:5]), allLogs[5][0]),
   558  				nil, nil, nil,
   559  			},
   560  			// multiple pending logs, should match only 2 topics from the logs in block 5
   561  			{
   562  				ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   563  				[]*types.Log{allLogs[5][0], allLogs[5][2]},
   564  				nil, nil, nil,
   565  			},
   566  			// match none due to only matching new mined logs
   567  			{
   568  				ethereum.FilterQuery{},
   569  				nil,
   570  				nil, nil, nil,
   571  			},
   572  			// match none due to only matching mined logs within a specific block range
   573  			{
   574  				ethereum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)},
   575  				nil,
   576  				nil, nil, nil,
   577  			},
   578  			// match all due to matching mined and pending logs
   579  			{
   580  				ethereum.FilterQuery{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())},
   581  				flattenLogs(allLogs),
   582  				nil, nil, nil,
   583  			},
   584  			// match none due to matching logs from a specific block number to new mined blocks
   585  			{
   586  				ethereum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   587  				nil,
   588  				nil, nil, nil,
   589  			},
   590  		}
   591  	)
   592  
   593  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   594  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   595  	// (some) events are posted.
   596  	for i := range testCases {
   597  		testCases[i].c = make(chan []*types.Log)
   598  		testCases[i].err = make(chan error, 1)
   599  
   600  		var err error
   601  		testCases[i].sub, err = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   602  		if err != nil {
   603  			t.Fatalf("SubscribeLogs %d failed: %v\n", i, err)
   604  		}
   605  	}
   606  
   607  	for n, test := range testCases {
   608  		i := n
   609  		tt := test
   610  		go func() {
   611  			defer tt.sub.Unsubscribe()
   612  
   613  			var fetched []*types.Log
   614  
   615  			timeout := time.After(1 * time.Second)
   616  		fetchLoop:
   617  			for {
   618  				select {
   619  				case logs := <-tt.c:
   620  					// Do not break early if we've fetched greater, or equal,
   621  					// to the number of logs expected. This ensures we do not
   622  					// deadlock the filter system because it will do a blocking
   623  					// send on this channel if another log arrives.
   624  					fetched = append(fetched, logs...)
   625  				case <-timeout:
   626  					break fetchLoop
   627  				}
   628  			}
   629  
   630  			if len(fetched) != len(tt.expected) {
   631  				tt.err <- fmt.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   632  				return
   633  			}
   634  
   635  			for l := range fetched {
   636  				if fetched[l].Removed {
   637  					tt.err <- fmt.Errorf("expected log not to be removed for log %d in case %d", l, i)
   638  					return
   639  				}
   640  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   641  					tt.err <- fmt.Errorf("invalid log on index %d for case %d\n", l, i)
   642  					return
   643  				}
   644  			}
   645  			tt.err <- nil
   646  		}()
   647  	}
   648  
   649  	// raise events
   650  	for _, ev := range allLogs {
   651  		backend.pendingLogsFeed.Send(ev)
   652  	}
   653  
   654  	for i := range testCases {
   655  		err := <-testCases[i].err
   656  		if err != nil {
   657  			t.Fatalf("test %d failed: %v", i, err)
   658  		}
   659  		<-testCases[i].sub.Err()
   660  	}
   661  }
   662  
   663  // TestPendingTxFilterDeadlock tests if the event loop hangs when pending
   664  // txes arrive at the same time that one of multiple filters is timing out.
   665  // Please refer to #22131 for more details.
   666  func TestPendingTxFilterDeadlock(t *testing.T) {
   667  	t.Parallel()
   668  	timeout := 100 * time.Millisecond
   669  
   670  	var (
   671  		db      = rawdb.NewMemoryDatabase()
   672  		backend = &testBackend{db: db}
   673  		api     = NewFilterAPI(backend, false, timeout)
   674  		done    = make(chan struct{})
   675  	)
   676  
   677  	go func() {
   678  		// Bombard feed with txes until signal was received to stop
   679  		i := uint64(0)
   680  		for {
   681  			select {
   682  			case <-done:
   683  				return
   684  			default:
   685  			}
   686  
   687  			tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil)
   688  			backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}})
   689  			i++
   690  		}
   691  	}()
   692  
   693  	// Create a bunch of filters that will
   694  	// timeout either in 100ms or 200ms
   695  	fids := make([]rpc.ID, 20)
   696  	for i := 0; i < len(fids); i++ {
   697  		fid := api.NewPendingTransactionFilter()
   698  		fids[i] = fid
   699  		// Wait for at least one tx to arrive in filter
   700  		for {
   701  			hashes, err := api.GetFilterChanges(fid)
   702  			if err != nil {
   703  				t.Fatalf("Filter should exist: %v\n", err)
   704  			}
   705  			if len(hashes.([]common.Hash)) > 0 {
   706  				break
   707  			}
   708  			runtime.Gosched()
   709  		}
   710  	}
   711  
   712  	// Wait until filters have timed out
   713  	time.Sleep(3 * timeout)
   714  
   715  	// If tx loop doesn't consume `done` after a second
   716  	// it's hanging.
   717  	select {
   718  	case done <- struct{}{}:
   719  		// Check that all filters have been uninstalled
   720  		for _, fid := range fids {
   721  			if _, err := api.GetFilterChanges(fid); err == nil {
   722  				t.Errorf("Filter %s should have been uninstalled\n", fid)
   723  			}
   724  		}
   725  	case <-time.After(1 * time.Second):
   726  		t.Error("Tx sending loop hangs")
   727  	}
   728  }
   729  
   730  func flattenLogs(pl [][]*types.Log) []*types.Log {
   731  	var logs []*types.Log
   732  	for _, l := range pl {
   733  		logs = append(logs, l...)
   734  	}
   735  	return logs
   736  }