github.com/aitimate-0/go-ethereum@v1.9.7/eth/filters/filter_system_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/big"
    23  	"math/rand"
    24  	"reflect"
    25  	"testing"
    26  	"time"
    27  
    28  	ethereum "github.com/ethereum/go-ethereum"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/consensus/ethash"
    31  	"github.com/ethereum/go-ethereum/core"
    32  	"github.com/ethereum/go-ethereum/core/bloombits"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/types"
    35  	"github.com/ethereum/go-ethereum/ethdb"
    36  	"github.com/ethereum/go-ethereum/event"
    37  	"github.com/ethereum/go-ethereum/params"
    38  	"github.com/ethereum/go-ethereum/rpc"
    39  )
    40  
    41  type testBackend struct {
    42  	mux        *event.TypeMux
    43  	db         ethdb.Database
    44  	sections   uint64
    45  	txFeed     *event.Feed
    46  	rmLogsFeed *event.Feed
    47  	logsFeed   *event.Feed
    48  	chainFeed  *event.Feed
    49  }
    50  
    51  func (b *testBackend) ChainDb() ethdb.Database {
    52  	return b.db
    53  }
    54  
    55  func (b *testBackend) EventMux() *event.TypeMux {
    56  	return b.mux
    57  }
    58  
    59  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    60  	var (
    61  		hash common.Hash
    62  		num  uint64
    63  	)
    64  	if blockNr == rpc.LatestBlockNumber {
    65  		hash = rawdb.ReadHeadBlockHash(b.db)
    66  		number := rawdb.ReadHeaderNumber(b.db, hash)
    67  		if number == nil {
    68  			return nil, nil
    69  		}
    70  		num = *number
    71  	} else {
    72  		num = uint64(blockNr)
    73  		hash = rawdb.ReadCanonicalHash(b.db, num)
    74  	}
    75  	return rawdb.ReadHeader(b.db, hash, num), nil
    76  }
    77  
    78  func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
    79  	number := rawdb.ReadHeaderNumber(b.db, hash)
    80  	if number == nil {
    81  		return nil, nil
    82  	}
    83  	return rawdb.ReadHeader(b.db, hash, *number), nil
    84  }
    85  
    86  func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
    87  	if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {
    88  		return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil
    89  	}
    90  	return nil, nil
    91  }
    92  
    93  func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
    94  	number := rawdb.ReadHeaderNumber(b.db, hash)
    95  	if number == nil {
    96  		return nil, nil
    97  	}
    98  	receipts := rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig)
    99  
   100  	logs := make([][]*types.Log, len(receipts))
   101  	for i, receipt := range receipts {
   102  		logs[i] = receipt.Logs
   103  	}
   104  	return logs, nil
   105  }
   106  
   107  func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
   108  	return b.txFeed.Subscribe(ch)
   109  }
   110  
   111  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
   112  	return b.rmLogsFeed.Subscribe(ch)
   113  }
   114  
   115  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
   116  	return b.logsFeed.Subscribe(ch)
   117  }
   118  
   119  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
   120  	return b.chainFeed.Subscribe(ch)
   121  }
   122  
   123  func (b *testBackend) BloomStatus() (uint64, uint64) {
   124  	return params.BloomBitsBlocks, b.sections
   125  }
   126  
   127  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
   128  	requests := make(chan chan *bloombits.Retrieval)
   129  
   130  	go session.Multiplex(16, 0, requests)
   131  	go func() {
   132  		for {
   133  			// Wait for a service request or a shutdown
   134  			select {
   135  			case <-ctx.Done():
   136  				return
   137  
   138  			case request := <-requests:
   139  				task := <-request
   140  
   141  				task.Bitsets = make([][]byte, len(task.Sections))
   142  				for i, section := range task.Sections {
   143  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   144  						head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   145  						task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
   146  					}
   147  				}
   148  				request <- task
   149  			}
   150  		}
   151  	}()
   152  }
   153  
   154  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   155  // It creates multiple subscriptions:
   156  // - one at the start and should receive all posted chain events and a second (blockHashes)
   157  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   158  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   159  func TestBlockSubscription(t *testing.T) {
   160  	t.Parallel()
   161  
   162  	var (
   163  		mux         = new(event.TypeMux)
   164  		db          = rawdb.NewMemoryDatabase()
   165  		txFeed      = new(event.Feed)
   166  		rmLogsFeed  = new(event.Feed)
   167  		logsFeed    = new(event.Feed)
   168  		chainFeed   = new(event.Feed)
   169  		backend     = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   170  		api         = NewPublicFilterAPI(backend, false)
   171  		genesis     = new(core.Genesis).MustCommit(db)
   172  		chain, _    = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
   173  		chainEvents = []core.ChainEvent{}
   174  	)
   175  
   176  	for _, blk := range chain {
   177  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   178  	}
   179  
   180  	chan0 := make(chan *types.Header)
   181  	sub0 := api.events.SubscribeNewHeads(chan0)
   182  	chan1 := make(chan *types.Header)
   183  	sub1 := api.events.SubscribeNewHeads(chan1)
   184  
   185  	go func() { // simulate client
   186  		i1, i2 := 0, 0
   187  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   188  			select {
   189  			case header := <-chan0:
   190  				if chainEvents[i1].Hash != header.Hash() {
   191  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   192  				}
   193  				i1++
   194  			case header := <-chan1:
   195  				if chainEvents[i2].Hash != header.Hash() {
   196  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   197  				}
   198  				i2++
   199  			}
   200  		}
   201  
   202  		sub0.Unsubscribe()
   203  		sub1.Unsubscribe()
   204  	}()
   205  
   206  	time.Sleep(1 * time.Second)
   207  	for _, e := range chainEvents {
   208  		chainFeed.Send(e)
   209  	}
   210  
   211  	<-sub0.Err()
   212  	<-sub1.Err()
   213  }
   214  
   215  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   216  func TestPendingTxFilter(t *testing.T) {
   217  	t.Parallel()
   218  
   219  	var (
   220  		mux        = new(event.TypeMux)
   221  		db         = rawdb.NewMemoryDatabase()
   222  		txFeed     = new(event.Feed)
   223  		rmLogsFeed = new(event.Feed)
   224  		logsFeed   = new(event.Feed)
   225  		chainFeed  = new(event.Feed)
   226  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   227  		api        = NewPublicFilterAPI(backend, false)
   228  
   229  		transactions = []*types.Transaction{
   230  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   231  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   232  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   233  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   234  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   235  		}
   236  
   237  		hashes []common.Hash
   238  	)
   239  
   240  	fid0 := api.NewPendingTransactionFilter()
   241  
   242  	time.Sleep(1 * time.Second)
   243  	txFeed.Send(core.NewTxsEvent{Txs: transactions})
   244  
   245  	timeout := time.Now().Add(1 * time.Second)
   246  	for {
   247  		results, err := api.GetFilterChanges(fid0)
   248  		if err != nil {
   249  			t.Fatalf("Unable to retrieve logs: %v", err)
   250  		}
   251  
   252  		h := results.([]common.Hash)
   253  		hashes = append(hashes, h...)
   254  		if len(hashes) >= len(transactions) {
   255  			break
   256  		}
   257  		// check timeout
   258  		if time.Now().After(timeout) {
   259  			break
   260  		}
   261  
   262  		time.Sleep(100 * time.Millisecond)
   263  	}
   264  
   265  	if len(hashes) != len(transactions) {
   266  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   267  		return
   268  	}
   269  	for i := range hashes {
   270  		if hashes[i] != transactions[i].Hash() {
   271  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   272  		}
   273  	}
   274  }
   275  
   276  // TestLogFilterCreation test whether a given filter criteria makes sense.
   277  // If not it must return an error.
   278  func TestLogFilterCreation(t *testing.T) {
   279  	var (
   280  		mux        = new(event.TypeMux)
   281  		db         = rawdb.NewMemoryDatabase()
   282  		txFeed     = new(event.Feed)
   283  		rmLogsFeed = new(event.Feed)
   284  		logsFeed   = new(event.Feed)
   285  		chainFeed  = new(event.Feed)
   286  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   287  		api        = NewPublicFilterAPI(backend, false)
   288  
   289  		testCases = []struct {
   290  			crit    FilterCriteria
   291  			success bool
   292  		}{
   293  			// defaults
   294  			{FilterCriteria{}, true},
   295  			// valid block number range
   296  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   297  			// "mined" block range to pending
   298  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   299  			// new mined and pending blocks
   300  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   301  			// from block "higher" than to block
   302  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   303  			// from block "higher" than to block
   304  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   305  			// from block "higher" than to block
   306  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   307  			// from block "higher" than to block
   308  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   309  		}
   310  	)
   311  
   312  	for i, test := range testCases {
   313  		_, err := api.NewFilter(test.crit)
   314  		if test.success && err != nil {
   315  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   316  		}
   317  		if !test.success && err == nil {
   318  			t.Errorf("expected testcase %d to fail with an error", i)
   319  		}
   320  	}
   321  }
   322  
   323  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   324  // when the filter is created.
   325  func TestInvalidLogFilterCreation(t *testing.T) {
   326  	t.Parallel()
   327  
   328  	var (
   329  		mux        = new(event.TypeMux)
   330  		db         = rawdb.NewMemoryDatabase()
   331  		txFeed     = new(event.Feed)
   332  		rmLogsFeed = new(event.Feed)
   333  		logsFeed   = new(event.Feed)
   334  		chainFeed  = new(event.Feed)
   335  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   336  		api        = NewPublicFilterAPI(backend, false)
   337  	)
   338  
   339  	// different situations where log filter creation should fail.
   340  	// Reason: fromBlock > toBlock
   341  	testCases := []FilterCriteria{
   342  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   343  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   344  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   345  	}
   346  
   347  	for i, test := range testCases {
   348  		if _, err := api.NewFilter(test); err == nil {
   349  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   350  		}
   351  	}
   352  }
   353  
   354  func TestInvalidGetLogsRequest(t *testing.T) {
   355  	var (
   356  		mux        = new(event.TypeMux)
   357  		db         = rawdb.NewMemoryDatabase()
   358  		txFeed     = new(event.Feed)
   359  		rmLogsFeed = new(event.Feed)
   360  		logsFeed   = new(event.Feed)
   361  		chainFeed  = new(event.Feed)
   362  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   363  		api        = NewPublicFilterAPI(backend, false)
   364  		blockHash  = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   365  	)
   366  
   367  	// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
   368  	testCases := []FilterCriteria{
   369  		0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)},
   370  		1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)},
   371  		2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   372  	}
   373  
   374  	for i, test := range testCases {
   375  		if _, err := api.GetLogs(context.Background(), test); err == nil {
   376  			t.Errorf("Expected Logs for case #%d to fail", i)
   377  		}
   378  	}
   379  }
   380  
   381  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   382  func TestLogFilter(t *testing.T) {
   383  	t.Parallel()
   384  
   385  	var (
   386  		mux        = new(event.TypeMux)
   387  		db         = rawdb.NewMemoryDatabase()
   388  		txFeed     = new(event.Feed)
   389  		rmLogsFeed = new(event.Feed)
   390  		logsFeed   = new(event.Feed)
   391  		chainFeed  = new(event.Feed)
   392  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   393  		api        = NewPublicFilterAPI(backend, false)
   394  
   395  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   396  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   397  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   398  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   399  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   400  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   401  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   402  
   403  		// posted twice, once as vm.Logs and once as core.PendingLogsEvent
   404  		allLogs = []*types.Log{
   405  			{Address: firstAddr},
   406  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   407  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   408  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   409  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   410  		}
   411  
   412  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   413  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   414  
   415  		testCases = []struct {
   416  			crit     FilterCriteria
   417  			expected []*types.Log
   418  			id       rpc.ID
   419  		}{
   420  			// match all
   421  			0: {FilterCriteria{}, allLogs, ""},
   422  			// match none due to no matching addresses
   423  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   424  			// match logs based on addresses, ignore topics
   425  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   426  			// match none due to no matching topics (match with address)
   427  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   428  			// match logs based on addresses and topics
   429  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   430  			// match logs based on multiple addresses and "or" topics
   431  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   432  			// logs in the pending block
   433  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   434  			// mined logs with block num >= 2 or pending logs
   435  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   436  			// all "mined" logs with block num >= 2
   437  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   438  			// all "mined" logs
   439  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   440  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   441  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   442  			// all "mined" and pending logs with topic firstTopic
   443  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   444  			// match all logs due to wildcard topic
   445  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   446  		}
   447  	)
   448  
   449  	// create all filters
   450  	for i := range testCases {
   451  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   452  	}
   453  
   454  	// raise events
   455  	time.Sleep(1 * time.Second)
   456  	if nsend := logsFeed.Send(allLogs); nsend == 0 {
   457  		t.Fatal("Shoud have at least one subscription")
   458  	}
   459  	if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil {
   460  		t.Fatal(err)
   461  	}
   462  
   463  	for i, tt := range testCases {
   464  		var fetched []*types.Log
   465  		timeout := time.Now().Add(1 * time.Second)
   466  		for { // fetch all expected logs
   467  			results, err := api.GetFilterChanges(tt.id)
   468  			if err != nil {
   469  				t.Fatalf("Unable to fetch logs: %v", err)
   470  			}
   471  
   472  			fetched = append(fetched, results.([]*types.Log)...)
   473  			if len(fetched) >= len(tt.expected) {
   474  				break
   475  			}
   476  			// check timeout
   477  			if time.Now().After(timeout) {
   478  				break
   479  			}
   480  
   481  			time.Sleep(100 * time.Millisecond)
   482  		}
   483  
   484  		if len(fetched) != len(tt.expected) {
   485  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   486  			return
   487  		}
   488  
   489  		for l := range fetched {
   490  			if fetched[l].Removed {
   491  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   492  			}
   493  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   494  				t.Errorf("invalid log on index %d for case %d", l, i)
   495  			}
   496  		}
   497  	}
   498  }
   499  
   500  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   501  func TestPendingLogsSubscription(t *testing.T) {
   502  	t.Parallel()
   503  
   504  	var (
   505  		mux        = new(event.TypeMux)
   506  		db         = rawdb.NewMemoryDatabase()
   507  		txFeed     = new(event.Feed)
   508  		rmLogsFeed = new(event.Feed)
   509  		logsFeed   = new(event.Feed)
   510  		chainFeed  = new(event.Feed)
   511  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   512  		api        = NewPublicFilterAPI(backend, false)
   513  
   514  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   515  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   516  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   517  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   518  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   519  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   520  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   521  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   522  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   523  
   524  		allLogs = []core.PendingLogsEvent{
   525  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
   526  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
   527  			{Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
   528  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
   529  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
   530  			{Logs: []*types.Log{
   531  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   532  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   533  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   534  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   535  			}},
   536  		}
   537  
   538  		convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
   539  			var logs []*types.Log
   540  			for _, l := range pl {
   541  				logs = append(logs, l.Logs...)
   542  			}
   543  			return logs
   544  		}
   545  
   546  		testCases = []struct {
   547  			crit     ethereum.FilterQuery
   548  			expected []*types.Log
   549  			c        chan []*types.Log
   550  			sub      *Subscription
   551  		}{
   552  			// match all
   553  			{ethereum.FilterQuery{}, convertLogs(allLogs), nil, nil},
   554  			// match none due to no matching addresses
   555  			{ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil},
   556  			// match logs based on addresses, ignore topics
   557  			{ethereum.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   558  			// match none due to no matching topics (match with address)
   559  			{ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
   560  			// match logs based on addresses and topics
   561  			{ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
   562  			// match logs based on multiple addresses and "or" topics
   563  			{ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
   564  			// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
   565  			{ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   566  			// multiple pending logs, should match only 2 topics from the logs in block 5
   567  			{ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
   568  		}
   569  	)
   570  
   571  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   572  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   573  	// (some) events are posted.
   574  	for i := range testCases {
   575  		testCases[i].c = make(chan []*types.Log)
   576  		testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   577  	}
   578  
   579  	for n, test := range testCases {
   580  		i := n
   581  		tt := test
   582  		go func() {
   583  			var fetched []*types.Log
   584  		fetchLoop:
   585  			for {
   586  				logs := <-tt.c
   587  				fetched = append(fetched, logs...)
   588  				if len(fetched) >= len(tt.expected) {
   589  					break fetchLoop
   590  				}
   591  			}
   592  
   593  			if len(fetched) != len(tt.expected) {
   594  				panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
   595  			}
   596  
   597  			for l := range fetched {
   598  				if fetched[l].Removed {
   599  					panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
   600  				}
   601  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   602  					panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
   603  				}
   604  			}
   605  		}()
   606  	}
   607  
   608  	// raise events
   609  	time.Sleep(1 * time.Second)
   610  	// allLogs are type of core.PendingLogsEvent
   611  	for _, l := range allLogs {
   612  		if err := mux.Post(l); err != nil {
   613  			t.Fatal(err)
   614  		}
   615  	}
   616  }