github.com/theQRL/go-zond@v0.2.1/zond/filters/filter_system_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"math/big"
    23  	"math/rand"
    24  	"reflect"
    25  	"runtime"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/theQRL/go-zond/common"
    30  	"github.com/theQRL/go-zond/consensus/beacon"
    31  	"github.com/theQRL/go-zond/core"
    32  	"github.com/theQRL/go-zond/core/bloombits"
    33  	"github.com/theQRL/go-zond/core/rawdb"
    34  	"github.com/theQRL/go-zond/core/types"
    35  	"github.com/theQRL/go-zond/event"
    36  	"github.com/theQRL/go-zond/internal/zondapi"
    37  	"github.com/theQRL/go-zond/params"
    38  	"github.com/theQRL/go-zond/rpc"
    39  	"github.com/theQRL/go-zond/zonddb"
    40  )
    41  
    42  type testBackend struct {
    43  	db              zonddb.Database
    44  	sections        uint64
    45  	txFeed          event.Feed
    46  	logsFeed        event.Feed
    47  	rmLogsFeed      event.Feed
    48  	chainFeed       event.Feed
    49  	pendingBlock    *types.Block
    50  	pendingReceipts types.Receipts
    51  }
    52  
    53  func (b *testBackend) ChainConfig() *params.ChainConfig {
    54  	return params.TestChainConfig
    55  }
    56  
    57  func (b *testBackend) CurrentHeader() *types.Header {
    58  	hdr, _ := b.HeaderByNumber(context.TODO(), rpc.LatestBlockNumber)
    59  	return hdr
    60  }
    61  
    62  func (b *testBackend) ChainDb() zonddb.Database {
    63  	return b.db
    64  }
    65  
    66  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    67  	var (
    68  		hash common.Hash
    69  		num  uint64
    70  	)
    71  	switch blockNr {
    72  	case rpc.LatestBlockNumber:
    73  		hash = rawdb.ReadHeadBlockHash(b.db)
    74  		number := rawdb.ReadHeaderNumber(b.db, hash)
    75  		if number == nil {
    76  			return nil, nil
    77  		}
    78  		num = *number
    79  	case rpc.FinalizedBlockNumber:
    80  		hash = rawdb.ReadFinalizedBlockHash(b.db)
    81  		number := rawdb.ReadHeaderNumber(b.db, hash)
    82  		if number == nil {
    83  			return nil, nil
    84  		}
    85  		num = *number
    86  	case rpc.SafeBlockNumber:
    87  		return nil, errors.New("safe block not found")
    88  	default:
    89  		num = uint64(blockNr)
    90  		hash = rawdb.ReadCanonicalHash(b.db, num)
    91  	}
    92  	return rawdb.ReadHeader(b.db, hash, num), nil
    93  }
    94  
    95  func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
    96  	number := rawdb.ReadHeaderNumber(b.db, hash)
    97  	if number == nil {
    98  		return nil, nil
    99  	}
   100  	return rawdb.ReadHeader(b.db, hash, *number), nil
   101  }
   102  
   103  func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
   104  	if body := rawdb.ReadBody(b.db, hash, uint64(number)); body != nil {
   105  		return body, nil
   106  	}
   107  	return nil, errors.New("block body not found")
   108  }
   109  
   110  func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
   111  	if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {
   112  		if header := rawdb.ReadHeader(b.db, hash, *number); header != nil {
   113  			return rawdb.ReadReceipts(b.db, hash, *number, header.Time, params.TestChainConfig), nil
   114  		}
   115  	}
   116  	return nil, nil
   117  }
   118  
   119  func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
   120  	logs := rawdb.ReadLogs(b.db, hash, number)
   121  	return logs, nil
   122  }
   123  
   124  func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
   125  	return b.txFeed.Subscribe(ch)
   126  }
   127  
   128  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
   129  	return b.rmLogsFeed.Subscribe(ch)
   130  }
   131  
   132  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
   133  	return b.logsFeed.Subscribe(ch)
   134  }
   135  
   136  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
   137  	return b.chainFeed.Subscribe(ch)
   138  }
   139  
   140  func (b *testBackend) BloomStatus() (uint64, uint64) {
   141  	return params.BloomBitsBlocks, b.sections
   142  }
   143  
   144  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
   145  	requests := make(chan chan *bloombits.Retrieval)
   146  
   147  	go session.Multiplex(16, 0, requests)
   148  	go func() {
   149  		for {
   150  			// Wait for a service request or a shutdown
   151  			select {
   152  			case <-ctx.Done():
   153  				return
   154  
   155  			case request := <-requests:
   156  				task := <-request
   157  
   158  				task.Bitsets = make([][]byte, len(task.Sections))
   159  				for i, section := range task.Sections {
   160  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   161  						head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   162  						task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
   163  					}
   164  				}
   165  				request <- task
   166  			}
   167  		}
   168  	}()
   169  }
   170  
   171  func (b *testBackend) setPending(block *types.Block, receipts types.Receipts) {
   172  	b.pendingBlock = block
   173  	b.pendingReceipts = receipts
   174  }
   175  
   176  func newTestFilterSystem(t testing.TB, db zonddb.Database, cfg Config) (*testBackend, *FilterSystem) {
   177  	backend := &testBackend{db: db}
   178  	sys := NewFilterSystem(backend, cfg)
   179  	return backend, sys
   180  }
   181  
   182  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   183  // It creates multiple subscriptions:
   184  // - one at the start and should receive all posted chain events and a second (blockHashes)
   185  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   186  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   187  func TestBlockSubscription(t *testing.T) {
   188  	t.Parallel()
   189  
   190  	var (
   191  		db           = rawdb.NewMemoryDatabase()
   192  		backend, sys = newTestFilterSystem(t, db, Config{})
   193  		api          = NewFilterAPI(sys)
   194  		genesis      = &core.Genesis{
   195  			Config:  params.TestChainConfig,
   196  			BaseFee: big.NewInt(params.InitialBaseFee),
   197  		}
   198  		_, chain, _ = core.GenerateChainWithGenesis(genesis, beacon.NewFaker(), 10, func(i int, gen *core.BlockGen) {})
   199  		chainEvents []core.ChainEvent
   200  	)
   201  
   202  	for _, blk := range chain {
   203  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   204  	}
   205  
   206  	chan0 := make(chan *types.Header)
   207  	sub0 := api.events.SubscribeNewHeads(chan0)
   208  	chan1 := make(chan *types.Header)
   209  	sub1 := api.events.SubscribeNewHeads(chan1)
   210  
   211  	go func() { // simulate client
   212  		i1, i2 := 0, 0
   213  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   214  			select {
   215  			case header := <-chan0:
   216  				if chainEvents[i1].Hash != header.Hash() {
   217  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   218  				}
   219  				i1++
   220  			case header := <-chan1:
   221  				if chainEvents[i2].Hash != header.Hash() {
   222  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   223  				}
   224  				i2++
   225  			}
   226  		}
   227  
   228  		sub0.Unsubscribe()
   229  		sub1.Unsubscribe()
   230  	}()
   231  
   232  	time.Sleep(1 * time.Second)
   233  	for _, e := range chainEvents {
   234  		backend.chainFeed.Send(e)
   235  	}
   236  
   237  	<-sub0.Err()
   238  	<-sub1.Err()
   239  }
   240  
   241  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   242  func TestPendingTxFilter(t *testing.T) {
   243  	t.Parallel()
   244  
   245  	var (
   246  		db           = rawdb.NewMemoryDatabase()
   247  		backend, sys = newTestFilterSystem(t, db, Config{})
   248  		api          = NewFilterAPI(sys)
   249  
   250  		to, _        = common.NewAddressFromString("Zb794f5ea0ba39494ce83a213fffba74279579268")
   251  		transactions = []*types.Transaction{
   252  			types.NewTx(&types.DynamicFeeTx{Nonce: 0, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   253  			types.NewTx(&types.DynamicFeeTx{Nonce: 1, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   254  			types.NewTx(&types.DynamicFeeTx{Nonce: 2, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   255  			types.NewTx(&types.DynamicFeeTx{Nonce: 3, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   256  			types.NewTx(&types.DynamicFeeTx{Nonce: 4, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   257  		}
   258  
   259  		hashes []common.Hash
   260  	)
   261  
   262  	fid0 := api.NewPendingTransactionFilter(nil)
   263  
   264  	time.Sleep(1 * time.Second)
   265  	backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
   266  
   267  	timeout := time.Now().Add(1 * time.Second)
   268  	for {
   269  		results, err := api.GetFilterChanges(fid0)
   270  		if err != nil {
   271  			t.Fatalf("Unable to retrieve logs: %v", err)
   272  		}
   273  
   274  		h := results.([]common.Hash)
   275  		hashes = append(hashes, h...)
   276  		if len(hashes) >= len(transactions) {
   277  			break
   278  		}
   279  		// check timeout
   280  		if time.Now().After(timeout) {
   281  			break
   282  		}
   283  
   284  		time.Sleep(100 * time.Millisecond)
   285  	}
   286  
   287  	if len(hashes) != len(transactions) {
   288  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   289  		return
   290  	}
   291  	for i := range hashes {
   292  		if hashes[i] != transactions[i].Hash() {
   293  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   294  		}
   295  	}
   296  }
   297  
   298  // TestPendingTxFilterFullTx tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   299  func TestPendingTxFilterFullTx(t *testing.T) {
   300  	t.Parallel()
   301  
   302  	var (
   303  		db           = rawdb.NewMemoryDatabase()
   304  		backend, sys = newTestFilterSystem(t, db, Config{})
   305  		api          = NewFilterAPI(sys)
   306  
   307  		to, _        = common.NewAddressFromString("Zb794f5ea0ba39494ce83a213fffba74279579268")
   308  		transactions = []*types.Transaction{
   309  			types.NewTx(&types.DynamicFeeTx{Nonce: 0, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   310  			types.NewTx(&types.DynamicFeeTx{Nonce: 1, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   311  			types.NewTx(&types.DynamicFeeTx{Nonce: 2, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   312  			types.NewTx(&types.DynamicFeeTx{Nonce: 3, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   313  			types.NewTx(&types.DynamicFeeTx{Nonce: 4, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}),
   314  		}
   315  
   316  		txs []*zondapi.RPCTransaction
   317  	)
   318  
   319  	fullTx := true
   320  	fid0 := api.NewPendingTransactionFilter(&fullTx)
   321  
   322  	time.Sleep(1 * time.Second)
   323  	backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
   324  
   325  	timeout := time.Now().Add(1 * time.Second)
   326  	for {
   327  		results, err := api.GetFilterChanges(fid0)
   328  		if err != nil {
   329  			t.Fatalf("Unable to retrieve logs: %v", err)
   330  		}
   331  
   332  		tx := results.([]*zondapi.RPCTransaction)
   333  		txs = append(txs, tx...)
   334  		if len(txs) >= len(transactions) {
   335  			break
   336  		}
   337  		// check timeout
   338  		if time.Now().After(timeout) {
   339  			break
   340  		}
   341  
   342  		time.Sleep(100 * time.Millisecond)
   343  	}
   344  
   345  	if len(txs) != len(transactions) {
   346  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(txs))
   347  		return
   348  	}
   349  	for i := range txs {
   350  		if txs[i].Hash != transactions[i].Hash() {
   351  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), txs[i].Hash)
   352  		}
   353  	}
   354  }
   355  
   356  // TestLogFilterCreation test whether a given filter criteria makes sense.
   357  // If not it must return an error.
   358  func TestLogFilterCreation(t *testing.T) {
   359  	var (
   360  		db     = rawdb.NewMemoryDatabase()
   361  		_, sys = newTestFilterSystem(t, db, Config{})
   362  		api    = NewFilterAPI(sys)
   363  
   364  		testCases = []struct {
   365  			crit    FilterCriteria
   366  			success bool
   367  		}{
   368  			// defaults
   369  			{FilterCriteria{}, true},
   370  			// valid block number range
   371  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   372  			// "mined" block range to pending
   373  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   374  			// from block "higher" than to block
   375  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   376  			// from block "higher" than to block
   377  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   378  			// from block "higher" than to block
   379  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   380  			// from block "higher" than to block
   381  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   382  			// topics more than 4
   383  			{FilterCriteria{Topics: [][]common.Hash{{}, {}, {}, {}, {}}}, false},
   384  		}
   385  	)
   386  
   387  	for i, test := range testCases {
   388  		id, err := api.NewFilter(test.crit)
   389  		if err != nil && test.success {
   390  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   391  		}
   392  		if err == nil {
   393  			api.UninstallFilter(id)
   394  			if !test.success {
   395  				t.Errorf("expected testcase %d to fail with an error", i)
   396  			}
   397  		}
   398  	}
   399  }
   400  
   401  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   402  // when the filter is created.
   403  func TestInvalidLogFilterCreation(t *testing.T) {
   404  	t.Parallel()
   405  
   406  	var (
   407  		db     = rawdb.NewMemoryDatabase()
   408  		_, sys = newTestFilterSystem(t, db, Config{})
   409  		api    = NewFilterAPI(sys)
   410  	)
   411  
   412  	// different situations where log filter creation should fail.
   413  	// Reason: fromBlock > toBlock
   414  	testCases := []FilterCriteria{
   415  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   416  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   417  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   418  		3: {Topics: [][]common.Hash{{}, {}, {}, {}, {}}},
   419  	}
   420  
   421  	for i, test := range testCases {
   422  		if _, err := api.NewFilter(test); err == nil {
   423  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   424  		}
   425  	}
   426  }
   427  
   428  // TestInvalidGetLogsRequest tests invalid getLogs requests
   429  func TestInvalidGetLogsRequest(t *testing.T) {
   430  	t.Parallel()
   431  
   432  	var (
   433  		db        = rawdb.NewMemoryDatabase()
   434  		_, sys    = newTestFilterSystem(t, db, Config{})
   435  		api       = NewFilterAPI(sys)
   436  		blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   437  	)
   438  
   439  	// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
   440  	testCases := []FilterCriteria{
   441  		0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)},
   442  		1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)},
   443  		2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   444  		3: {BlockHash: &blockHash, Topics: [][]common.Hash{{}, {}, {}, {}, {}}},
   445  	}
   446  
   447  	for i, test := range testCases {
   448  		if _, err := api.GetLogs(context.Background(), test); err == nil {
   449  			t.Errorf("Expected Logs for case #%d to fail", i)
   450  		}
   451  	}
   452  }
   453  
   454  // TestInvalidGetRangeLogsRequest tests getLogs with invalid block range
   455  func TestInvalidGetRangeLogsRequest(t *testing.T) {
   456  	t.Parallel()
   457  
   458  	var (
   459  		db     = rawdb.NewMemoryDatabase()
   460  		_, sys = newTestFilterSystem(t, db, Config{})
   461  		api    = NewFilterAPI(sys)
   462  	)
   463  
   464  	if _, err := api.GetLogs(context.Background(), FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}); err != errInvalidBlockRange {
   465  		t.Errorf("Expected Logs for invalid range return error, but got: %v", err)
   466  	}
   467  }
   468  
   469  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   470  func TestLogFilter(t *testing.T) {
   471  	t.Parallel()
   472  
   473  	var (
   474  		db           = rawdb.NewMemoryDatabase()
   475  		backend, sys = newTestFilterSystem(t, db, Config{})
   476  		api          = NewFilterAPI(sys)
   477  
   478  		firstAddr, _   = common.NewAddressFromString("Z1111111111111111111111111111111111111111")
   479  		secondAddr, _  = common.NewAddressFromString("Z2222222222222222222222222222222222222222")
   480  		thirdAddr, _   = common.NewAddressFromString("Z3333333333333333333333333333333333333333")
   481  		notUsedAddr, _ = common.NewAddressFromString("Z9999999999999999999999999999999999999999")
   482  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   483  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   484  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   485  
   486  		// posted twice, once as regular logs and once as pending logs.
   487  		allLogs = []*types.Log{
   488  			{Address: firstAddr},
   489  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   490  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   491  			{Address: thirdAddr, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   492  			{Address: thirdAddr, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   493  		}
   494  
   495  		testCases = []struct {
   496  			crit     FilterCriteria
   497  			expected []*types.Log
   498  			id       rpc.ID
   499  		}{
   500  			// match all
   501  			0: {FilterCriteria{}, allLogs, ""},
   502  			// match none due to no matching addresses
   503  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddr}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   504  			// match logs based on addresses, ignore topics
   505  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   506  			// match none due to no matching topics (match with address)
   507  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   508  			// match logs based on addresses and topics
   509  			4: {FilterCriteria{Addresses: []common.Address{thirdAddr}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   510  			// match logs based on multiple addresses and "or" topics
   511  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddr}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   512  			// all "mined" logs with block num >= 2
   513  			6: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   514  			// all "mined" logs
   515  			7: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   516  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   517  			8: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   518  			// match all logs due to wildcard topic
   519  			9: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   520  		}
   521  	)
   522  
   523  	// create all filters
   524  	for i := range testCases {
   525  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   526  	}
   527  
   528  	// raise events
   529  	time.Sleep(1 * time.Second)
   530  	if nsend := backend.logsFeed.Send(allLogs); nsend == 0 {
   531  		t.Fatal("Logs event not delivered")
   532  	}
   533  
   534  	for i, tt := range testCases {
   535  		var fetched []*types.Log
   536  		timeout := time.Now().Add(1 * time.Second)
   537  		for { // fetch all expected logs
   538  			results, err := api.GetFilterChanges(tt.id)
   539  			if err != nil {
   540  				t.Fatalf("test %d: unable to fetch logs: %v", i, err)
   541  			}
   542  
   543  			fetched = append(fetched, results.([]*types.Log)...)
   544  			if len(fetched) >= len(tt.expected) {
   545  				break
   546  			}
   547  			// check timeout
   548  			if time.Now().After(timeout) {
   549  				break
   550  			}
   551  
   552  			time.Sleep(100 * time.Millisecond)
   553  		}
   554  
   555  		if len(fetched) != len(tt.expected) {
   556  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   557  			return
   558  		}
   559  
   560  		for l := range fetched {
   561  			if fetched[l].Removed {
   562  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   563  			}
   564  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   565  				t.Errorf("invalid log on index %d for case %d", l, i)
   566  			}
   567  		}
   568  	}
   569  }
   570  
   571  // TestPendingTxFilterDeadlock tests if the event loop hangs when pending
   572  // txes arrive at the same time that one of multiple filters is timing out.
   573  // Please refer to #22131 for more details.
   574  func TestPendingTxFilterDeadlock(t *testing.T) {
   575  	t.Parallel()
   576  	timeout := 100 * time.Millisecond
   577  
   578  	var (
   579  		db           = rawdb.NewMemoryDatabase()
   580  		backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout})
   581  		api          = NewFilterAPI(sys)
   582  		done         = make(chan struct{})
   583  	)
   584  
   585  	go func() {
   586  		// Bombard feed with txes until signal was received to stop
   587  		i := uint64(0)
   588  		for {
   589  			select {
   590  			case <-done:
   591  				return
   592  			default:
   593  			}
   594  
   595  			to, _ := common.NewAddressFromString("Zb794f5ea0ba39494ce83a213fffba74279579268")
   596  			tx := types.NewTx(&types.DynamicFeeTx{Nonce: i, To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil})
   597  			backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}})
   598  			i++
   599  		}
   600  	}()
   601  
   602  	// Create a bunch of filters that will
   603  	// timeout either in 100ms or 200ms
   604  	subs := make([]*Subscription, 20)
   605  	for i := 0; i < len(subs); i++ {
   606  		fid := api.NewPendingTransactionFilter(nil)
   607  		f, ok := api.filters[fid]
   608  		if !ok {
   609  			t.Fatalf("Filter %s should exist", fid)
   610  		}
   611  		subs[i] = f.s
   612  		// Wait for at least one tx to arrive in filter
   613  		for {
   614  			hashes, err := api.GetFilterChanges(fid)
   615  			if err != nil {
   616  				t.Fatalf("Filter should exist: %v\n", err)
   617  			}
   618  			if len(hashes.([]common.Hash)) > 0 {
   619  				break
   620  			}
   621  			runtime.Gosched()
   622  		}
   623  	}
   624  
   625  	// Wait until filters have timed out and have been uninstalled.
   626  	for _, sub := range subs {
   627  		select {
   628  		case <-sub.Err():
   629  		case <-time.After(1 * time.Second):
   630  			t.Fatalf("Filter timeout is hanging")
   631  		}
   632  	}
   633  }