github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/eth/filters/filter_system_test.go (about)

     1  // Copyright 2016 The Spectrum Authors
     2  // This file is part of the Spectrum library.
     3  //
     4  // The Spectrum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The Spectrum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/big"
    23  	"math/rand"
    24  	"reflect"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/SmartMeshFoundation/Spectrum/common"
    29  	"github.com/SmartMeshFoundation/Spectrum/consensus/ethash"
    30  	"github.com/SmartMeshFoundation/Spectrum/core"
    31  	"github.com/SmartMeshFoundation/Spectrum/core/bloombits"
    32  	"github.com/SmartMeshFoundation/Spectrum/core/types"
    33  	"github.com/SmartMeshFoundation/Spectrum/ethdb"
    34  	"github.com/SmartMeshFoundation/Spectrum/event"
    35  	"github.com/SmartMeshFoundation/Spectrum/params"
    36  	"github.com/SmartMeshFoundation/Spectrum/rpc"
    37  )
    38  
    39  type testBackend struct {
    40  	mux        *event.TypeMux
    41  	db         ethdb.Database
    42  	sections   uint64
    43  	txFeed     *event.Feed
    44  	rmLogsFeed *event.Feed
    45  	logsFeed   *event.Feed
    46  	chainFeed  *event.Feed
    47  }
    48  
    49  func (b *testBackend) ChainDb() ethdb.Database {
    50  	return b.db
    51  }
    52  
    53  func (b *testBackend) EventMux() *event.TypeMux {
    54  	return b.mux
    55  }
    56  
    57  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    58  	var hash common.Hash
    59  	var num uint64
    60  	if blockNr == rpc.LatestBlockNumber {
    61  		hash = core.GetHeadBlockHash(b.db)
    62  		num = core.GetBlockNumber(b.db, hash)
    63  	} else {
    64  		num = uint64(blockNr)
    65  		hash = core.GetCanonicalHash(b.db, num)
    66  	}
    67  	return core.GetHeader(b.db, hash, num), nil
    68  }
    69  
    70  func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
    71  	num := core.GetBlockNumber(b.db, blockHash)
    72  	return core.GetBlockReceipts(b.db, blockHash, num), nil
    73  }
    74  
    75  func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
    76  	return b.txFeed.Subscribe(ch)
    77  }
    78  
    79  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
    80  	return b.rmLogsFeed.Subscribe(ch)
    81  }
    82  
    83  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
    84  	return b.logsFeed.Subscribe(ch)
    85  }
    86  
    87  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
    88  	return b.chainFeed.Subscribe(ch)
    89  }
    90  
    91  func (b *testBackend) BloomStatus() (uint64, uint64) {
    92  	return params.BloomBitsBlocks, b.sections
    93  }
    94  
    95  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
    96  	requests := make(chan chan *bloombits.Retrieval)
    97  
    98  	go session.Multiplex(16, 0, requests)
    99  	go func() {
   100  		for {
   101  			// Wait for a service request or a shutdown
   102  			select {
   103  			case <-ctx.Done():
   104  				return
   105  
   106  			case request := <-requests:
   107  				task := <-request
   108  
   109  				task.Bitsets = make([][]byte, len(task.Sections))
   110  				for i, section := range task.Sections {
   111  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   112  						head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   113  						task.Bitsets[i], _ = core.GetBloomBits(b.db, task.Bit, section, head)
   114  					}
   115  				}
   116  				request <- task
   117  			}
   118  		}
   119  	}()
   120  }
   121  
   122  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   123  // It creates multiple subscriptions:
   124  // - one at the start and should receive all posted chain events and a second (blockHashes)
   125  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   126  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   127  func TestBlockSubscription(t *testing.T) {
   128  	t.Parallel()
   129  
   130  	var (
   131  		mux         = new(event.TypeMux)
   132  		db, _       = ethdb.NewMemDatabase()
   133  		txFeed      = new(event.Feed)
   134  		rmLogsFeed  = new(event.Feed)
   135  		logsFeed    = new(event.Feed)
   136  		chainFeed   = new(event.Feed)
   137  		backend     = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   138  		api         = NewPublicFilterAPI(backend, false)
   139  		genesis     = new(core.Genesis).MustCommit(db)
   140  		chain, _    = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
   141  		chainEvents = []core.ChainEvent{}
   142  	)
   143  
   144  	for _, blk := range chain {
   145  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   146  	}
   147  
   148  	chan0 := make(chan *types.Header)
   149  	sub0 := api.events.SubscribeNewHeads(chan0)
   150  	chan1 := make(chan *types.Header)
   151  	sub1 := api.events.SubscribeNewHeads(chan1)
   152  
   153  	go func() { // simulate client
   154  		i1, i2 := 0, 0
   155  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   156  			select {
   157  			case header := <-chan0:
   158  				if chainEvents[i1].Hash != header.Hash() {
   159  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   160  				}
   161  				i1++
   162  			case header := <-chan1:
   163  				if chainEvents[i2].Hash != header.Hash() {
   164  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   165  				}
   166  				i2++
   167  			}
   168  		}
   169  
   170  		sub0.Unsubscribe()
   171  		sub1.Unsubscribe()
   172  	}()
   173  
   174  	time.Sleep(1 * time.Second)
   175  	for _, e := range chainEvents {
   176  		chainFeed.Send(e)
   177  	}
   178  
   179  	<-sub0.Err()
   180  	<-sub1.Err()
   181  }
   182  
   183  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   184  func TestPendingTxFilter(t *testing.T) {
   185  	t.Parallel()
   186  
   187  	var (
   188  		mux        = new(event.TypeMux)
   189  		db, _      = ethdb.NewMemDatabase()
   190  		txFeed     = new(event.Feed)
   191  		rmLogsFeed = new(event.Feed)
   192  		logsFeed   = new(event.Feed)
   193  		chainFeed  = new(event.Feed)
   194  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   195  		api        = NewPublicFilterAPI(backend, false)
   196  
   197  		transactions = []*types.Transaction{
   198  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   199  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   200  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   201  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   202  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   203  		}
   204  
   205  		hashes []common.Hash
   206  	)
   207  
   208  	fid0 := api.NewPendingTransactionFilter()
   209  
   210  	time.Sleep(1 * time.Second)
   211  	for _, tx := range transactions {
   212  		ev := core.TxPreEvent{Tx: tx}
   213  		txFeed.Send(ev)
   214  	}
   215  
   216  	timeout := time.Now().Add(1 * time.Second)
   217  	for {
   218  		results, err := api.GetFilterChanges(fid0)
   219  		if err != nil {
   220  			t.Fatalf("Unable to retrieve logs: %v", err)
   221  		}
   222  
   223  		h := results.([]common.Hash)
   224  		hashes = append(hashes, h...)
   225  		if len(hashes) >= len(transactions) {
   226  			break
   227  		}
   228  		// check timeout
   229  		if time.Now().After(timeout) {
   230  			break
   231  		}
   232  
   233  		time.Sleep(100 * time.Millisecond)
   234  	}
   235  
   236  	if len(hashes) != len(transactions) {
   237  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   238  		return
   239  	}
   240  	for i := range hashes {
   241  		if hashes[i] != transactions[i].Hash() {
   242  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   243  		}
   244  	}
   245  }
   246  
   247  // TestLogFilterCreation test whether a given filter criteria makes sense.
   248  // If not it must return an error.
   249  func TestLogFilterCreation(t *testing.T) {
   250  	var (
   251  		mux        = new(event.TypeMux)
   252  		db, _      = ethdb.NewMemDatabase()
   253  		txFeed     = new(event.Feed)
   254  		rmLogsFeed = new(event.Feed)
   255  		logsFeed   = new(event.Feed)
   256  		chainFeed  = new(event.Feed)
   257  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   258  		api        = NewPublicFilterAPI(backend, false)
   259  
   260  		testCases = []struct {
   261  			crit    FilterCriteria
   262  			success bool
   263  		}{
   264  			// defaults
   265  			{FilterCriteria{}, true},
   266  			// valid block number range
   267  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   268  			// "mined" block range to pending
   269  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   270  			// new mined and pending blocks
   271  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   272  			// from block "higher" than to block
   273  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   274  			// from block "higher" than to block
   275  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   276  			// from block "higher" than to block
   277  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   278  			// from block "higher" than to block
   279  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   280  		}
   281  	)
   282  
   283  	for i, test := range testCases {
   284  		_, err := api.NewFilter(test.crit)
   285  		if test.success && err != nil {
   286  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   287  		}
   288  		if !test.success && err == nil {
   289  			t.Errorf("expected testcase %d to fail with an error", i)
   290  		}
   291  	}
   292  }
   293  
   294  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   295  // when the filter is created.
   296  func TestInvalidLogFilterCreation(t *testing.T) {
   297  	t.Parallel()
   298  
   299  	var (
   300  		mux        = new(event.TypeMux)
   301  		db, _      = ethdb.NewMemDatabase()
   302  		txFeed     = new(event.Feed)
   303  		rmLogsFeed = new(event.Feed)
   304  		logsFeed   = new(event.Feed)
   305  		chainFeed  = new(event.Feed)
   306  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   307  		api        = NewPublicFilterAPI(backend, false)
   308  	)
   309  
   310  	// different situations where log filter creation should fail.
   311  	// Reason: fromBlock > toBlock
   312  	testCases := []FilterCriteria{
   313  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   314  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   315  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   316  	}
   317  
   318  	for i, test := range testCases {
   319  		if _, err := api.NewFilter(test); err == nil {
   320  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   321  		}
   322  	}
   323  }
   324  
   325  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   326  func TestLogFilter(t *testing.T) {
   327  	t.Parallel()
   328  
   329  	var (
   330  		mux        = new(event.TypeMux)
   331  		db, _      = ethdb.NewMemDatabase()
   332  		txFeed     = new(event.Feed)
   333  		rmLogsFeed = new(event.Feed)
   334  		logsFeed   = new(event.Feed)
   335  		chainFeed  = new(event.Feed)
   336  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   337  		api        = NewPublicFilterAPI(backend, false)
   338  
   339  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   340  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   341  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   342  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   343  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   344  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   345  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   346  
   347  		// posted twice, once as vm.Logs and once as core.PendingLogsEvent
   348  		allLogs = []*types.Log{
   349  			{Address: firstAddr},
   350  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   351  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   352  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   353  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   354  		}
   355  
   356  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   357  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   358  
   359  		testCases = []struct {
   360  			crit     FilterCriteria
   361  			expected []*types.Log
   362  			id       rpc.ID
   363  		}{
   364  			// match all
   365  			0: {FilterCriteria{}, allLogs, ""},
   366  			// match none due to no matching addresses
   367  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   368  			// match logs based on addresses, ignore topics
   369  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   370  			// match none due to no matching topics (match with address)
   371  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   372  			// match logs based on addresses and topics
   373  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   374  			// match logs based on multiple addresses and "or" topics
   375  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   376  			// logs in the pending block
   377  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   378  			// mined logs with block num >= 2 or pending logs
   379  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   380  			// all "mined" logs with block num >= 2
   381  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   382  			// all "mined" logs
   383  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   384  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   385  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   386  			// all "mined" and pending logs with topic firstTopic
   387  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   388  			// match all logs due to wildcard topic
   389  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   390  		}
   391  	)
   392  
   393  	// create all filters
   394  	for i := range testCases {
   395  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   396  	}
   397  
   398  	// raise events
   399  	time.Sleep(1 * time.Second)
   400  	if nsend := logsFeed.Send(allLogs); nsend == 0 {
   401  		t.Fatal("Shoud have at least one subscription")
   402  	}
   403  	if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil {
   404  		t.Fatal(err)
   405  	}
   406  
   407  	for i, tt := range testCases {
   408  		var fetched []*types.Log
   409  		timeout := time.Now().Add(1 * time.Second)
   410  		for { // fetch all expected logs
   411  			results, err := api.GetFilterChanges(tt.id)
   412  			if err != nil {
   413  				t.Fatalf("Unable to fetch logs: %v", err)
   414  			}
   415  
   416  			fetched = append(fetched, results.([]*types.Log)...)
   417  			if len(fetched) >= len(tt.expected) {
   418  				break
   419  			}
   420  			// check timeout
   421  			if time.Now().After(timeout) {
   422  				break
   423  			}
   424  
   425  			time.Sleep(100 * time.Millisecond)
   426  		}
   427  
   428  		if len(fetched) != len(tt.expected) {
   429  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   430  			return
   431  		}
   432  
   433  		for l := range fetched {
   434  			if fetched[l].Removed {
   435  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   436  			}
   437  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   438  				t.Errorf("invalid log on index %d for case %d", l, i)
   439  			}
   440  		}
   441  	}
   442  }
   443  
   444  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   445  func TestPendingLogsSubscription(t *testing.T) {
   446  	t.Parallel()
   447  
   448  	var (
   449  		mux        = new(event.TypeMux)
   450  		db, _      = ethdb.NewMemDatabase()
   451  		txFeed     = new(event.Feed)
   452  		rmLogsFeed = new(event.Feed)
   453  		logsFeed   = new(event.Feed)
   454  		chainFeed  = new(event.Feed)
   455  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   456  		api        = NewPublicFilterAPI(backend, false)
   457  
   458  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   459  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   460  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   461  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   462  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   463  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   464  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   465  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   466  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   467  
   468  		allLogs = []core.PendingLogsEvent{
   469  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
   470  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
   471  			{Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
   472  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
   473  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
   474  			{Logs: []*types.Log{
   475  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   476  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   477  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   478  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   479  			}},
   480  		}
   481  
   482  		convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
   483  			var logs []*types.Log
   484  			for _, l := range pl {
   485  				logs = append(logs, l.Logs...)
   486  			}
   487  			return logs
   488  		}
   489  
   490  		testCases = []struct {
   491  			crit     FilterCriteria
   492  			expected []*types.Log
   493  			c        chan []*types.Log
   494  			sub      *Subscription
   495  		}{
   496  			// match all
   497  			{FilterCriteria{}, convertLogs(allLogs), nil, nil},
   498  			// match none due to no matching addresses
   499  			{FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil},
   500  			// match logs based on addresses, ignore topics
   501  			{FilterCriteria{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   502  			// match none due to no matching topics (match with address)
   503  			{FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
   504  			// match logs based on addresses and topics
   505  			{FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
   506  			// match logs based on multiple addresses and "or" topics
   507  			{FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
   508  			// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
   509  			{FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   510  			// multiple pending logs, should match only 2 topics from the logs in block 5
   511  			{FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
   512  		}
   513  	)
   514  
   515  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   516  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   517  	// (some) events are posted.
   518  	for i := range testCases {
   519  		testCases[i].c = make(chan []*types.Log)
   520  		testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   521  	}
   522  
   523  	for n, test := range testCases {
   524  		i := n
   525  		tt := test
   526  		go func() {
   527  			var fetched []*types.Log
   528  		fetchLoop:
   529  			for {
   530  				logs := <-tt.c
   531  				fetched = append(fetched, logs...)
   532  				if len(fetched) >= len(tt.expected) {
   533  					break fetchLoop
   534  				}
   535  			}
   536  
   537  			if len(fetched) != len(tt.expected) {
   538  				panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
   539  			}
   540  
   541  			for l := range fetched {
   542  				if fetched[l].Removed {
   543  					panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
   544  				}
   545  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   546  					panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
   547  				}
   548  			}
   549  		}()
   550  	}
   551  
   552  	// raise events
   553  	time.Sleep(1 * time.Second)
   554  	// allLogs are type of core.PendingLogsEvent
   555  	for _, l := range allLogs {
   556  		if err := mux.Post(l); err != nil {
   557  			t.Fatal(err)
   558  		}
   559  	}
   560  }