github.com/MetalBlockchain/subnet-evm@v0.4.9/eth/filters/filter_system_test.go (about)

     1  // (c) 2019-2022, Ava Labs, Inc.
     2  //
     3  // This file is a derived work, based on the go-ethereum library whose original
     4  // notices appear below.
     5  //
     6  // It is distributed under a license compatible with the licensing terms of the
     7  // original code from which it is derived.
     8  //
     9  // Much love to the original authors for their work.
    10  // **********
    11  // Copyright 2016 The go-ethereum Authors
    12  // This file is part of the go-ethereum library.
    13  //
    14  // The go-ethereum library is free software: you can redistribute it and/or modify
    15  // it under the terms of the GNU Lesser General Public License as published by
    16  // the Free Software Foundation, either version 3 of the License, or
    17  // (at your option) any later version.
    18  //
    19  // The go-ethereum library is distributed in the hope that it will be useful,
    20  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    21  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    22  // GNU Lesser General Public License for more details.
    23  //
    24  // You should have received a copy of the GNU Lesser General Public License
    25  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    26  
    27  package filters
    28  
    29  import (
    30  	"context"
    31  	"fmt"
    32  	"math/big"
    33  	"math/rand"
    34  	"reflect"
    35  	"runtime"
    36  	"testing"
    37  	"time"
    38  
    39  	"github.com/MetalBlockchain/subnet-evm/consensus/dummy"
    40  	"github.com/MetalBlockchain/subnet-evm/core"
    41  	"github.com/MetalBlockchain/subnet-evm/core/bloombits"
    42  	"github.com/MetalBlockchain/subnet-evm/core/rawdb"
    43  	"github.com/MetalBlockchain/subnet-evm/core/types"
    44  	"github.com/MetalBlockchain/subnet-evm/core/vm"
    45  	"github.com/MetalBlockchain/subnet-evm/ethdb"
    46  	"github.com/MetalBlockchain/subnet-evm/interfaces"
    47  	"github.com/MetalBlockchain/subnet-evm/params"
    48  	"github.com/MetalBlockchain/subnet-evm/rpc"
    49  	"github.com/ethereum/go-ethereum/common"
    50  	"github.com/ethereum/go-ethereum/event"
    51  	"github.com/stretchr/testify/require"
    52  )
    53  
    54  type testBackend struct {
    55  	db              ethdb.Database
    56  	sections        uint64
    57  	txFeed          event.Feed
    58  	acceptedTxFeed  event.Feed
    59  	logsFeed        event.Feed
    60  	rmLogsFeed      event.Feed
    61  	pendingLogsFeed event.Feed
    62  	chainFeed       event.Feed
    63  }
    64  
    65  func (b *testBackend) ChainDb() ethdb.Database {
    66  	return b.db
    67  }
    68  
    69  func (b *testBackend) GetVMConfig() *vm.Config {
    70  	return &vm.Config{AllowUnfinalizedQueries: true}
    71  }
    72  
    73  func (b *testBackend) GetMaxBlocksPerRequest() int64 {
    74  	return 0
    75  }
    76  
    77  func (b *testBackend) LastAcceptedBlock() *types.Block {
    78  	return rawdb.ReadHeadBlock(b.db)
    79  }
    80  
    81  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    82  	var (
    83  		hash common.Hash
    84  		num  uint64
    85  	)
    86  	if blockNr == rpc.LatestBlockNumber {
    87  		hash = rawdb.ReadHeadBlockHash(b.db)
    88  		number := rawdb.ReadHeaderNumber(b.db, hash)
    89  		if number == nil {
    90  			return nil, nil
    91  		}
    92  		num = *number
    93  	} else {
    94  		num = uint64(blockNr)
    95  		hash = rawdb.ReadCanonicalHash(b.db, num)
    96  	}
    97  	return rawdb.ReadHeader(b.db, hash, num), nil
    98  }
    99  
   100  func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
   101  	number := rawdb.ReadHeaderNumber(b.db, hash)
   102  	if number == nil {
   103  		return nil, nil
   104  	}
   105  	return rawdb.ReadHeader(b.db, hash, *number), nil
   106  }
   107  
   108  func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
   109  	if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {
   110  		return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil
   111  	}
   112  	return nil, nil
   113  }
   114  
   115  func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
   116  	logs := rawdb.ReadLogs(b.db, hash, number)
   117  	return logs, nil
   118  }
   119  
   120  func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
   121  	return b.txFeed.Subscribe(ch)
   122  }
   123  
   124  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
   125  	return b.rmLogsFeed.Subscribe(ch)
   126  }
   127  
   128  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
   129  	return b.logsFeed.Subscribe(ch)
   130  }
   131  
   132  func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
   133  	return b.pendingLogsFeed.Subscribe(ch)
   134  }
   135  
   136  func (b *testBackend) SubscribeAcceptedLogsEvent(ch chan<- []*types.Log) event.Subscription {
   137  	return b.logsFeed.Subscribe(ch)
   138  }
   139  
   140  func (b *testBackend) SubscribeAcceptedTransactionEvent(ch chan<- core.NewTxsEvent) event.Subscription {
   141  	return b.acceptedTxFeed.Subscribe(ch)
   142  }
   143  
   144  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
   145  	return b.chainFeed.Subscribe(ch)
   146  }
   147  
   148  func (b *testBackend) SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) event.Subscription {
   149  	return b.chainFeed.Subscribe(ch)
   150  }
   151  
   152  func (b *testBackend) BloomStatus() (uint64, uint64) {
   153  	return params.BloomBitsBlocks, b.sections
   154  }
   155  
   156  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
   157  	requests := make(chan chan *bloombits.Retrieval)
   158  
   159  	go session.Multiplex(16, 0, requests)
   160  	go func() {
   161  		for {
   162  			// Wait for a service request or a shutdown
   163  			select {
   164  			case <-ctx.Done():
   165  				return
   166  
   167  			case request := <-requests:
   168  				task := <-request
   169  
   170  				task.Bitsets = make([][]byte, len(task.Sections))
   171  				for i, section := range task.Sections {
   172  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   173  						head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   174  						task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
   175  					}
   176  				}
   177  				request <- task
   178  			}
   179  		}
   180  	}()
   181  }
   182  
   183  func newTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config) (*testBackend, *FilterSystem) {
   184  	backend := &testBackend{db: db}
   185  	sys := NewFilterSystem(backend, cfg)
   186  	return backend, sys
   187  }
   188  
   189  func newSectionedTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config, sections uint64) (*testBackend, *FilterSystem) {
   190  	backend := &testBackend{db: db, sections: sections}
   191  	sys := NewFilterSystem(backend, cfg)
   192  	return backend, sys
   193  }
   194  
   195  func TestBlockSubscription(t *testing.T) {
   196  	t.Parallel()
   197  
   198  	var (
   199  		db           = rawdb.NewMemoryDatabase()
   200  		backend, sys = newTestFilterSystem(t, db, Config{})
   201  		api          = NewFilterAPI(sys, false)
   202  		gspec        = &core.Genesis{
   203  			Config:  params.TestChainConfig,
   204  			BaseFee: big.NewInt(1),
   205  		}
   206  		genesis     = gspec.MustCommit(db)
   207  		chain, _, _ = core.GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, 10, 10, func(i int, b *core.BlockGen) {})
   208  		chainEvents = []core.ChainEvent{}
   209  	)
   210  
   211  	for _, blk := range chain {
   212  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   213  	}
   214  
   215  	chan0 := make(chan *types.Header)
   216  	sub0 := api.events.SubscribeNewHeads(chan0)
   217  	chan1 := make(chan *types.Header)
   218  	sub1 := api.events.SubscribeNewHeads(chan1)
   219  
   220  	go func() { // simulate client
   221  		i1, i2 := 0, 0
   222  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   223  			select {
   224  			case header := <-chan0:
   225  				if chainEvents[i1].Hash != header.Hash() {
   226  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   227  				}
   228  				i1++
   229  			case header := <-chan1:
   230  				if chainEvents[i2].Hash != header.Hash() {
   231  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   232  				}
   233  				i2++
   234  			}
   235  		}
   236  
   237  		sub0.Unsubscribe()
   238  		sub1.Unsubscribe()
   239  	}()
   240  
   241  	time.Sleep(1 * time.Second)
   242  	for _, e := range chainEvents {
   243  		backend.chainFeed.Send(e)
   244  	}
   245  
   246  	<-sub0.Err()
   247  	<-sub1.Err()
   248  }
   249  
   250  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   251  func TestPendingTxFilter(t *testing.T) {
   252  	t.Parallel()
   253  
   254  	var (
   255  		db           = rawdb.NewMemoryDatabase()
   256  		backend, sys = newTestFilterSystem(t, db, Config{})
   257  		api          = NewFilterAPI(sys, false)
   258  
   259  		transactions = []*types.Transaction{
   260  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   261  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   262  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   263  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   264  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   265  		}
   266  
   267  		hashes []common.Hash
   268  	)
   269  
   270  	fid0 := api.NewPendingTransactionFilter()
   271  
   272  	time.Sleep(1 * time.Second)
   273  	backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
   274  
   275  	timeout := time.Now().Add(1 * time.Second)
   276  	for {
   277  		results, err := api.GetFilterChanges(fid0)
   278  		if err != nil {
   279  			t.Fatalf("Unable to retrieve logs: %v", err)
   280  		}
   281  
   282  		h := results.([]common.Hash)
   283  		hashes = append(hashes, h...)
   284  		if len(hashes) >= len(transactions) {
   285  			break
   286  		}
   287  		// check timeout
   288  		if time.Now().After(timeout) {
   289  			break
   290  		}
   291  
   292  		time.Sleep(100 * time.Millisecond)
   293  	}
   294  
   295  	if len(hashes) != len(transactions) {
   296  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   297  		return
   298  	}
   299  	for i := range hashes {
   300  		if hashes[i] != transactions[i].Hash() {
   301  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   302  		}
   303  	}
   304  }
   305  
   306  // TestLogFilterCreation test whether a given filter criteria makes sense.
   307  // If not it must return an error.
   308  func TestLogFilterCreation(t *testing.T) {
   309  	var (
   310  		db     = rawdb.NewMemoryDatabase()
   311  		_, sys = newTestFilterSystem(t, db, Config{})
   312  		api    = NewFilterAPI(sys, false)
   313  
   314  		testCases = []struct {
   315  			crit    FilterCriteria
   316  			success bool
   317  		}{
   318  			// defaults
   319  			{FilterCriteria{}, true},
   320  			// valid block number range
   321  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   322  			// "mined" block range to pending
   323  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   324  			// new mined and pending blocks
   325  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   326  			// from block "higher" than to block
   327  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   328  			// from block "higher" than to block
   329  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   330  			// from block "higher" than to block
   331  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   332  			// from block "higher" than to block
   333  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   334  		}
   335  	)
   336  
   337  	for i, test := range testCases {
   338  		id, err := api.NewFilter(test.crit)
   339  		if err != nil && test.success {
   340  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   341  		}
   342  		if err == nil {
   343  			api.UninstallFilter(id)
   344  			if !test.success {
   345  				t.Errorf("expected testcase %d to fail with an error", i)
   346  			}
   347  		}
   348  	}
   349  }
   350  
   351  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   352  // when the filter is created.
   353  func TestInvalidLogFilterCreation(t *testing.T) {
   354  	t.Parallel()
   355  
   356  	var (
   357  		db     = rawdb.NewMemoryDatabase()
   358  		_, sys = newTestFilterSystem(t, db, Config{})
   359  		api    = NewFilterAPI(sys, false)
   360  	)
   361  
   362  	// different situations where log filter creation should fail.
   363  	// Reason: fromBlock > toBlock
   364  	testCases := []FilterCriteria{
   365  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   366  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   367  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   368  	}
   369  
   370  	for i, test := range testCases {
   371  		if _, err := api.NewFilter(test); err == nil {
   372  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   373  		}
   374  	}
   375  }
   376  
   377  func TestInvalidGetLogsRequest(t *testing.T) {
   378  	var (
   379  		db        = rawdb.NewMemoryDatabase()
   380  		_, sys    = newTestFilterSystem(t, db, Config{})
   381  		api       = NewFilterAPI(sys, false)
   382  		blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   383  	)
   384  
   385  	// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
   386  	testCases := []FilterCriteria{
   387  		0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)},
   388  		1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)},
   389  		2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   390  	}
   391  
   392  	for i, test := range testCases {
   393  		if _, err := api.GetLogs(context.Background(), test); err == nil {
   394  			t.Errorf("Expected Logs for case #%d to fail", i)
   395  		}
   396  	}
   397  }
   398  
   399  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   400  func TestLogFilter(t *testing.T) {
   401  	t.Parallel()
   402  
   403  	var (
   404  		db           = rawdb.NewMemoryDatabase()
   405  		backend, sys = newTestFilterSystem(t, db, Config{})
   406  		api          = NewFilterAPI(sys, false)
   407  
   408  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   409  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   410  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   411  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   412  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   413  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   414  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   415  
   416  		// posted twice, once as regular logs and once as pending logs.
   417  		allLogs = []*types.Log{
   418  			{Address: firstAddr},
   419  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   420  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   421  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   422  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   423  		}
   424  
   425  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   426  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   427  
   428  		testCases = []struct {
   429  			crit     FilterCriteria
   430  			expected []*types.Log
   431  			id       rpc.ID
   432  		}{
   433  			// match all
   434  			0: {FilterCriteria{}, allLogs, ""},
   435  			// match none due to no matching addresses
   436  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   437  			// match logs based on addresses, ignore topics
   438  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   439  			// match none due to no matching topics (match with address)
   440  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   441  			// match logs based on addresses and topics
   442  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   443  			// match logs based on multiple addresses and "or" topics
   444  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   445  			// logs in the pending block
   446  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   447  			// mined logs with block num >= 2 or pending logs
   448  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   449  			// all "mined" logs with block num >= 2
   450  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   451  			// all "mined" logs
   452  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   453  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   454  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   455  			// all "mined" and pending logs with topic firstTopic
   456  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   457  			// match all logs due to wildcard topic
   458  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   459  		}
   460  	)
   461  
   462  	// create all filters
   463  	for i := range testCases {
   464  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   465  	}
   466  
   467  	// raise events
   468  	time.Sleep(1 * time.Second)
   469  	if nsend := backend.logsFeed.Send(allLogs); nsend == 0 {
   470  		t.Fatal("Logs event not delivered")
   471  	}
   472  	if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 {
   473  		t.Fatal("Pending logs event not delivered")
   474  	}
   475  
   476  	for i, tt := range testCases {
   477  		var fetched []*types.Log
   478  		timeout := time.Now().Add(1 * time.Second)
   479  		for { // fetch all expected logs
   480  			results, err := api.GetFilterChanges(tt.id)
   481  			if err != nil {
   482  				t.Fatalf("Unable to fetch logs: %v", err)
   483  			}
   484  
   485  			fetched = append(fetched, results.([]*types.Log)...)
   486  			if len(fetched) >= len(tt.expected) {
   487  				break
   488  			}
   489  			// check timeout
   490  			if time.Now().After(timeout) {
   491  				break
   492  			}
   493  
   494  			time.Sleep(100 * time.Millisecond)
   495  		}
   496  
   497  		if len(fetched) != len(tt.expected) {
   498  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   499  			return
   500  		}
   501  
   502  		for l := range fetched {
   503  			if fetched[l].Removed {
   504  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   505  			}
   506  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   507  				t.Errorf("invalid log on index %d for case %d", l, i)
   508  			}
   509  		}
   510  	}
   511  }
   512  
   513  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   514  func TestPendingLogsSubscription(t *testing.T) {
   515  	t.Parallel()
   516  
   517  	var (
   518  		db           = rawdb.NewMemoryDatabase()
   519  		backend, sys = newTestFilterSystem(t, db, Config{})
   520  		api          = NewFilterAPI(sys, false)
   521  
   522  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   523  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   524  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   525  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   526  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   527  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   528  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   529  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   530  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   531  
   532  		allLogs = [][]*types.Log{
   533  			{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}},
   534  			{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}},
   535  			{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}},
   536  			{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}},
   537  			{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}},
   538  			{
   539  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   540  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   541  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   542  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   543  			},
   544  		}
   545  
   546  		pendingBlockNumber = big.NewInt(rpc.PendingBlockNumber.Int64())
   547  
   548  		testCases = []struct {
   549  			crit     interfaces.FilterQuery
   550  			expected []*types.Log
   551  			c        chan []*types.Log
   552  			sub      *Subscription
   553  			err      chan error
   554  		}{
   555  			// match all
   556  			{
   557  				interfaces.FilterQuery{FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   558  				flattenLogs(allLogs),
   559  				nil, nil, nil,
   560  			},
   561  			// match none due to no matching addresses
   562  			{
   563  				interfaces.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   564  				nil,
   565  				nil, nil, nil,
   566  			},
   567  			// match logs based on addresses, ignore topics
   568  			{
   569  				interfaces.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   570  				append(flattenLogs(allLogs[:2]), allLogs[5][3]),
   571  				nil, nil, nil,
   572  			},
   573  			// match none due to no matching topics (match with address)
   574  			{
   575  				interfaces.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   576  				nil,
   577  				nil, nil, nil,
   578  			},
   579  			// match logs based on addresses and topics
   580  			{
   581  				interfaces.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   582  				append(flattenLogs(allLogs[3:5]), allLogs[5][0]),
   583  				nil, nil, nil,
   584  			},
   585  			// match logs based on multiple addresses and "or" topics
   586  			{
   587  				interfaces.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   588  				append(flattenLogs(allLogs[2:5]), allLogs[5][0]),
   589  				nil, nil, nil,
   590  			},
   591  			// multiple pending logs, should match only 2 topics from the logs in block 5
   592  			{
   593  				interfaces.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber},
   594  				[]*types.Log{allLogs[5][0], allLogs[5][2]},
   595  				nil, nil, nil,
   596  			},
   597  			// match none due to only matching new mined logs
   598  			{
   599  				interfaces.FilterQuery{},
   600  				nil,
   601  				nil, nil, nil,
   602  			},
   603  			// match none due to only matching mined logs within a specific block range
   604  			{
   605  				interfaces.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)},
   606  				nil,
   607  				nil, nil, nil,
   608  			},
   609  			// match all due to matching mined and pending logs
   610  			{
   611  				interfaces.FilterQuery{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())},
   612  				flattenLogs(allLogs),
   613  				nil, nil, nil,
   614  			},
   615  			// match none due to matching logs from a specific block number to new mined blocks
   616  			{
   617  				interfaces.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   618  				nil,
   619  				nil, nil, nil,
   620  			},
   621  		}
   622  	)
   623  
   624  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   625  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   626  	// (some) events are posted.
   627  	for i := range testCases {
   628  		testCases[i].c = make(chan []*types.Log)
   629  		testCases[i].err = make(chan error, 1)
   630  
   631  		var err error
   632  		testCases[i].sub, err = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   633  		if err != nil {
   634  			t.Fatalf("SubscribeLogs %d failed: %v\n", i, err)
   635  		}
   636  	}
   637  
   638  	for n, test := range testCases {
   639  		i := n
   640  		tt := test
   641  		go func() {
   642  			defer tt.sub.Unsubscribe()
   643  
   644  			var fetched []*types.Log
   645  
   646  			timeout := time.After(1 * time.Second)
   647  		fetchLoop:
   648  			for {
   649  				select {
   650  				case logs := <-tt.c:
   651  					// Do not break early if we've fetched greater, or equal,
   652  					// to the number of logs expected. This ensures we do not
   653  					// deadlock the filter system because it will do a blocking
   654  					// send on this channel if another log arrives.
   655  					fetched = append(fetched, logs...)
   656  				case <-timeout:
   657  					break fetchLoop
   658  				}
   659  			}
   660  
   661  			if len(fetched) != len(tt.expected) {
   662  				tt.err <- fmt.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   663  				return
   664  			}
   665  
   666  			for l := range fetched {
   667  				if fetched[l].Removed {
   668  					tt.err <- fmt.Errorf("expected log not to be removed for log %d in case %d", l, i)
   669  					return
   670  				}
   671  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   672  					tt.err <- fmt.Errorf("invalid log on index %d for case %d\n", l, i)
   673  					return
   674  				}
   675  			}
   676  			tt.err <- nil
   677  		}()
   678  	}
   679  
   680  	// raise events
   681  	for _, ev := range allLogs {
   682  		backend.pendingLogsFeed.Send(ev)
   683  	}
   684  
   685  	for i := range testCases {
   686  		err := <-testCases[i].err
   687  		if err != nil {
   688  			t.Fatalf("test %d failed: %v", i, err)
   689  		}
   690  		<-testCases[i].sub.Err()
   691  	}
   692  }
   693  
   694  // TestPendingTxFilterDeadlock tests if the event loop hangs when pending
   695  // txes arrive at the same time that one of multiple filters is timing out.
   696  // Please refer to #22131 for more details.
   697  func TestPendingTxFilterDeadlock(t *testing.T) {
   698  	t.Parallel()
   699  	timeout := 100 * time.Millisecond
   700  
   701  	var (
   702  		db           = rawdb.NewMemoryDatabase()
   703  		backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout})
   704  		api          = NewFilterAPI(sys, false)
   705  		done         = make(chan struct{})
   706  	)
   707  
   708  	go func() {
   709  		// Bombard feed with txes until signal was received to stop
   710  		i := uint64(0)
   711  		for {
   712  			select {
   713  			case <-done:
   714  				return
   715  			default:
   716  			}
   717  
   718  			tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil)
   719  			backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}})
   720  			i++
   721  		}
   722  	}()
   723  
   724  	// Create a bunch of filters that will
   725  	// timeout either in 100ms or 200ms
   726  	fids := make([]rpc.ID, 20)
   727  	for i := 0; i < len(fids); i++ {
   728  		fid := api.NewPendingTransactionFilter()
   729  		fids[i] = fid
   730  		// Wait for at least one tx to arrive in filter
   731  		for {
   732  			hashes, err := api.GetFilterChanges(fid)
   733  			if err != nil {
   734  				t.Fatalf("Filter should exist: %v\n", err)
   735  			}
   736  			if len(hashes.([]common.Hash)) > 0 {
   737  				break
   738  			}
   739  			runtime.Gosched()
   740  		}
   741  	}
   742  
   743  	// Wait until filters have timed out
   744  	time.Sleep(3 * timeout)
   745  
   746  	// If tx loop doesn't consume `done` after a second
   747  	// it's hanging.
   748  	select {
   749  	case done <- struct{}{}:
   750  		// Check that all filters have been uninstalled
   751  		for _, fid := range fids {
   752  			if _, err := api.GetFilterChanges(fid); err == nil {
   753  				t.Errorf("Filter %s should have been uninstalled\n", fid)
   754  			}
   755  		}
   756  	case <-time.After(1 * time.Second):
   757  		t.Error("Tx sending loop hangs")
   758  	}
   759  }
   760  
   761  func flattenLogs(pl [][]*types.Log) []*types.Log {
   762  	var logs []*types.Log
   763  	for _, l := range pl {
   764  		logs = append(logs, l...)
   765  	}
   766  	return logs
   767  }
   768  
   769  func TestGetLogsRegression(t *testing.T) {
   770  	var (
   771  		db     = rawdb.NewMemoryDatabase()
   772  		_, sys = newSectionedTestFilterSystem(t, db, Config{}, 4096)
   773  		api    = NewFilterAPI(sys, false)
   774  		gspec  = core.Genesis{
   775  			Config: params.TestChainConfig,
   776  		}
   777  		genesis = gspec.MustCommit(db)
   778  		_, _, _ = core.GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, 10, 10, func(i int, b *core.BlockGen) {})
   779  	)
   780  
   781  	test := FilterCriteria{BlockHash: &common.Hash{}, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}
   782  
   783  	_, err := api.GetLogs(context.Background(), test)
   784  	require.Error(t, err, "unknown block")
   785  }