github.com/halybang/go-ethereum@v1.0.5-0.20180325041310-3b262bc1367c/eth/filters/filter_system_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/big"
    23  	"math/rand"
    24  	"reflect"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/wanchain/go-wanchain/common"
    29  	"github.com/wanchain/go-wanchain/consensus/ethash"
    30  	"github.com/wanchain/go-wanchain/core"
    31  	"github.com/wanchain/go-wanchain/core/bloombits"
    32  	"github.com/wanchain/go-wanchain/core/types"
    33  	"github.com/wanchain/go-wanchain/core/vm"
    34  	"github.com/wanchain/go-wanchain/ethdb"
    35  	"github.com/wanchain/go-wanchain/event"
    36  	"github.com/wanchain/go-wanchain/params"
    37  	"github.com/wanchain/go-wanchain/rpc"
    38  )
    39  
    40  type testBackend struct {
    41  	mux        *event.TypeMux
    42  	db         ethdb.Database
    43  	sections   uint64
    44  	txFeed     *event.Feed
    45  	rmLogsFeed *event.Feed
    46  	logsFeed   *event.Feed
    47  	chainFeed  *event.Feed
    48  }
    49  
    50  func (b *testBackend) ChainDb() ethdb.Database {
    51  	return b.db
    52  }
    53  
    54  func (b *testBackend) EventMux() *event.TypeMux {
    55  	return b.mux
    56  }
    57  
    58  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    59  	var hash common.Hash
    60  	var num uint64
    61  	if blockNr == rpc.LatestBlockNumber {
    62  		hash = core.GetHeadBlockHash(b.db)
    63  		num = core.GetBlockNumber(b.db, hash)
    64  	} else {
    65  		num = uint64(blockNr)
    66  		hash = core.GetCanonicalHash(b.db, num)
    67  	}
    68  	return core.GetHeader(b.db, hash, num), nil
    69  }
    70  
    71  func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
    72  	num := core.GetBlockNumber(b.db, blockHash)
    73  	return core.GetBlockReceipts(b.db, blockHash, num), nil
    74  }
    75  
    76  func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
    77  	return b.txFeed.Subscribe(ch)
    78  }
    79  
    80  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
    81  	return b.rmLogsFeed.Subscribe(ch)
    82  }
    83  
    84  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
    85  	return b.logsFeed.Subscribe(ch)
    86  }
    87  
    88  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
    89  	return b.chainFeed.Subscribe(ch)
    90  }
    91  
    92  func (b *testBackend) BloomStatus() (uint64, uint64) {
    93  	return params.BloomBitsBlocks, b.sections
    94  }
    95  
    96  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
    97  	requests := make(chan chan *bloombits.Retrieval)
    98  
    99  	go session.Multiplex(16, 0, requests)
   100  	go func() {
   101  		for {
   102  			// Wait for a service request or a shutdown
   103  			select {
   104  			case <-ctx.Done():
   105  				return
   106  
   107  			case request := <-requests:
   108  				task := <-request
   109  
   110  				task.Bitsets = make([][]byte, len(task.Sections))
   111  				for i, section := range task.Sections {
   112  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   113  						head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   114  						task.Bitsets[i] = core.GetBloomBits(b.db, task.Bit, section, head)
   115  					}
   116  				}
   117  				request <- task
   118  			}
   119  		}
   120  	}()
   121  }
   122  
   123  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   124  // It creates multiple subscriptions:
   125  // - one at the start and should receive all posted chain events and a second (blockHashes)
   126  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   127  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   128  func TestBlockSubscription(t *testing.T) {
   129  	t.Parallel()
   130  
   131  	var (
   132  		mux        = new(event.TypeMux)
   133  		db, _      = ethdb.NewMemDatabase()
   134  		engine     = ethash.NewFaker(db)
   135  		txFeed     = new(event.Feed)
   136  		rmLogsFeed = new(event.Feed)
   137  		logsFeed   = new(event.Feed)
   138  		chainFeed  = new(event.Feed)
   139  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   140  		api        = NewPublicFilterAPI(backend, false)
   141  		// genesis     = new(core.Genesis).MustCommit(db)
   142  		// chain, _    = core.GenerateChain(params.TestChainConfig, genesis, db, 10, func(i int, gen *core.BlockGen) {})
   143  		chainEvents = []core.ChainEvent{}
   144  	)
   145  
   146  	// create a genesis block
   147  	gspec := core.DefaultPPOWTestingGenesisBlock()
   148  	genesis := gspec.MustCommit(db)
   149  
   150  	blockChain, _ := core.NewBlockChain(db, gspec.Config, engine, vm.Config{})
   151  	defer blockChain.Stop()
   152  
   153  	chainEnv := core.NewChainEnv(gspec.Config, gspec, engine, blockChain, db)
   154  
   155  	chain, _ := chainEnv.GenerateChain(genesis, 10, func(i int, gen *core.BlockGen) {})
   156  
   157  	for _, blk := range chain {
   158  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   159  	}
   160  
   161  	chan0 := make(chan *types.Header)
   162  	sub0 := api.events.SubscribeNewHeads(chan0)
   163  	chan1 := make(chan *types.Header)
   164  	sub1 := api.events.SubscribeNewHeads(chan1)
   165  
   166  	go func() { // simulate client
   167  		i1, i2 := 0, 0
   168  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   169  			select {
   170  			case header := <-chan0:
   171  				if chainEvents[i1].Hash != header.Hash() {
   172  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   173  				}
   174  				i1++
   175  			case header := <-chan1:
   176  				if chainEvents[i2].Hash != header.Hash() {
   177  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   178  				}
   179  				i2++
   180  			}
   181  		}
   182  
   183  		sub0.Unsubscribe()
   184  		sub1.Unsubscribe()
   185  	}()
   186  
   187  	time.Sleep(1 * time.Second)
   188  	for _, e := range chainEvents {
   189  		chainFeed.Send(e)
   190  	}
   191  
   192  	<-sub0.Err()
   193  	<-sub1.Err()
   194  }
   195  
   196  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   197  func TestPendingTxFilter(t *testing.T) {
   198  	t.Parallel()
   199  
   200  	var (
   201  		mux        = new(event.TypeMux)
   202  		db, _      = ethdb.NewMemDatabase()
   203  		txFeed     = new(event.Feed)
   204  		rmLogsFeed = new(event.Feed)
   205  		logsFeed   = new(event.Feed)
   206  		chainFeed  = new(event.Feed)
   207  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   208  		api        = NewPublicFilterAPI(backend, false)
   209  
   210  		transactions = []*types.Transaction{
   211  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   212  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   213  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   214  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   215  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
   216  		}
   217  
   218  		hashes []common.Hash
   219  	)
   220  
   221  	fid0 := api.NewPendingTransactionFilter()
   222  
   223  	time.Sleep(1 * time.Second)
   224  	for _, tx := range transactions {
   225  		ev := core.TxPreEvent{Tx: tx}
   226  		txFeed.Send(ev)
   227  	}
   228  
   229  	timeout := time.Now().Add(1 * time.Second)
   230  	for {
   231  		results, err := api.GetFilterChanges(fid0)
   232  		if err != nil {
   233  			t.Fatalf("Unable to retrieve logs: %v", err)
   234  		}
   235  
   236  		h := results.([]common.Hash)
   237  		hashes = append(hashes, h...)
   238  		if len(hashes) >= len(transactions) {
   239  			break
   240  		}
   241  		// check timeout
   242  		if time.Now().After(timeout) {
   243  			break
   244  		}
   245  
   246  		time.Sleep(100 * time.Millisecond)
   247  	}
   248  
   249  	if len(hashes) != len(transactions) {
   250  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   251  		return
   252  	}
   253  	for i := range hashes {
   254  		if hashes[i] != transactions[i].Hash() {
   255  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   256  		}
   257  	}
   258  }
   259  
   260  // TestLogFilterCreation test whether a given filter criteria makes sense.
   261  // If not it must return an error.
   262  func TestLogFilterCreation(t *testing.T) {
   263  	var (
   264  		mux        = new(event.TypeMux)
   265  		db, _      = ethdb.NewMemDatabase()
   266  		txFeed     = new(event.Feed)
   267  		rmLogsFeed = new(event.Feed)
   268  		logsFeed   = new(event.Feed)
   269  		chainFeed  = new(event.Feed)
   270  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   271  		api        = NewPublicFilterAPI(backend, false)
   272  
   273  		testCases = []struct {
   274  			crit    FilterCriteria
   275  			success bool
   276  		}{
   277  			// defaults
   278  			{FilterCriteria{}, true},
   279  			// valid block number range
   280  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   281  			// "mined" block range to pending
   282  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   283  			// new mined and pending blocks
   284  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   285  			// from block "higher" than to block
   286  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   287  			// from block "higher" than to block
   288  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   289  			// from block "higher" than to block
   290  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   291  			// from block "higher" than to block
   292  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   293  		}
   294  	)
   295  
   296  	for i, test := range testCases {
   297  		_, err := api.NewFilter(test.crit)
   298  		if test.success && err != nil {
   299  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   300  		}
   301  		if !test.success && err == nil {
   302  			t.Errorf("expected testcase %d to fail with an error", i)
   303  		}
   304  	}
   305  }
   306  
   307  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   308  // when the filter is created.
   309  func TestInvalidLogFilterCreation(t *testing.T) {
   310  	t.Parallel()
   311  
   312  	var (
   313  		mux        = new(event.TypeMux)
   314  		db, _      = ethdb.NewMemDatabase()
   315  		txFeed     = new(event.Feed)
   316  		rmLogsFeed = new(event.Feed)
   317  		logsFeed   = new(event.Feed)
   318  		chainFeed  = new(event.Feed)
   319  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   320  		api        = NewPublicFilterAPI(backend, false)
   321  	)
   322  
   323  	// different situations where log filter creation should fail.
   324  	// Reason: fromBlock > toBlock
   325  	testCases := []FilterCriteria{
   326  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   327  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   328  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   329  	}
   330  
   331  	for i, test := range testCases {
   332  		if _, err := api.NewFilter(test); err == nil {
   333  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   334  		}
   335  	}
   336  }
   337  
   338  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   339  func TestLogFilter(t *testing.T) {
   340  	t.Parallel()
   341  
   342  	var (
   343  		mux        = new(event.TypeMux)
   344  		db, _      = ethdb.NewMemDatabase()
   345  		txFeed     = new(event.Feed)
   346  		rmLogsFeed = new(event.Feed)
   347  		logsFeed   = new(event.Feed)
   348  		chainFeed  = new(event.Feed)
   349  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   350  		api        = NewPublicFilterAPI(backend, false)
   351  
   352  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   353  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   354  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   355  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   356  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   357  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   358  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   359  
   360  		// posted twice, once as vm.Logs and once as core.PendingLogsEvent
   361  		allLogs = []*types.Log{
   362  			{Address: firstAddr},
   363  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   364  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   365  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   366  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   367  		}
   368  
   369  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   370  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   371  
   372  		testCases = []struct {
   373  			crit     FilterCriteria
   374  			expected []*types.Log
   375  			id       rpc.ID
   376  		}{
   377  			// match all
   378  			0: {FilterCriteria{}, allLogs, ""},
   379  			// match none due to no matching addresses
   380  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   381  			// match logs based on addresses, ignore topics
   382  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   383  			// match none due to no matching topics (match with address)
   384  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   385  			// match logs based on addresses and topics
   386  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   387  			// match logs based on multiple addresses and "or" topics
   388  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   389  			// logs in the pending block
   390  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   391  			// mined logs with block num >= 2 or pending logs
   392  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   393  			// all "mined" logs with block num >= 2
   394  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   395  			// all "mined" logs
   396  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   397  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   398  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   399  			// all "mined" and pending logs with topic firstTopic
   400  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   401  			// match all logs due to wildcard topic
   402  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   403  		}
   404  	)
   405  
   406  	// create all filters
   407  	for i := range testCases {
   408  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   409  	}
   410  
   411  	// raise events
   412  	time.Sleep(1 * time.Second)
   413  	if nsend := logsFeed.Send(allLogs); nsend == 0 {
   414  		t.Fatal("Shoud have at least one subscription")
   415  	}
   416  	if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil {
   417  		t.Fatal(err)
   418  	}
   419  
   420  	for i, tt := range testCases {
   421  		var fetched []*types.Log
   422  		timeout := time.Now().Add(1 * time.Second)
   423  		for { // fetch all expected logs
   424  			results, err := api.GetFilterChanges(tt.id)
   425  			if err != nil {
   426  				t.Fatalf("Unable to fetch logs: %v", err)
   427  			}
   428  
   429  			fetched = append(fetched, results.([]*types.Log)...)
   430  			if len(fetched) >= len(tt.expected) {
   431  				break
   432  			}
   433  			// check timeout
   434  			if time.Now().After(timeout) {
   435  				break
   436  			}
   437  
   438  			time.Sleep(100 * time.Millisecond)
   439  		}
   440  
   441  		if len(fetched) != len(tt.expected) {
   442  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   443  			return
   444  		}
   445  
   446  		for l := range fetched {
   447  			if fetched[l].Removed {
   448  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   449  			}
   450  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   451  				t.Errorf("invalid log on index %d for case %d", l, i)
   452  			}
   453  		}
   454  	}
   455  }
   456  
   457  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   458  func TestPendingLogsSubscription(t *testing.T) {
   459  	t.Parallel()
   460  
   461  	var (
   462  		mux        = new(event.TypeMux)
   463  		db, _      = ethdb.NewMemDatabase()
   464  		txFeed     = new(event.Feed)
   465  		rmLogsFeed = new(event.Feed)
   466  		logsFeed   = new(event.Feed)
   467  		chainFeed  = new(event.Feed)
   468  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   469  		api        = NewPublicFilterAPI(backend, false)
   470  
   471  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   472  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   473  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   474  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   475  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   476  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   477  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   478  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   479  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   480  
   481  		allLogs = []core.PendingLogsEvent{
   482  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
   483  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
   484  			{Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
   485  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
   486  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
   487  			{Logs: []*types.Log{
   488  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   489  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   490  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   491  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   492  			}},
   493  		}
   494  
   495  		convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
   496  			var logs []*types.Log
   497  			for _, l := range pl {
   498  				logs = append(logs, l.Logs...)
   499  			}
   500  			return logs
   501  		}
   502  
   503  		testCases = []struct {
   504  			crit     FilterCriteria
   505  			expected []*types.Log
   506  			c        chan []*types.Log
   507  			sub      *Subscription
   508  		}{
   509  			// match all
   510  			{FilterCriteria{}, convertLogs(allLogs), nil, nil},
   511  			// match none due to no matching addresses
   512  			{FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil},
   513  			// match logs based on addresses, ignore topics
   514  			{FilterCriteria{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   515  			// match none due to no matching topics (match with address)
   516  			{FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
   517  			// match logs based on addresses and topics
   518  			{FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
   519  			// match logs based on multiple addresses and "or" topics
   520  			{FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
   521  			// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
   522  			{FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   523  			// multiple pending logs, should match only 2 topics from the logs in block 5
   524  			{FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
   525  		}
   526  	)
   527  
   528  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   529  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   530  	// (some) events are posted.
   531  	for i := range testCases {
   532  		testCases[i].c = make(chan []*types.Log)
   533  		testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   534  	}
   535  
   536  	for n, test := range testCases {
   537  		i := n
   538  		tt := test
   539  		go func() {
   540  			var fetched []*types.Log
   541  		fetchLoop:
   542  			for {
   543  				logs := <-tt.c
   544  				fetched = append(fetched, logs...)
   545  				if len(fetched) >= len(tt.expected) {
   546  					break fetchLoop
   547  				}
   548  			}
   549  
   550  			if len(fetched) != len(tt.expected) {
   551  				panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
   552  			}
   553  
   554  			for l := range fetched {
   555  				if fetched[l].Removed {
   556  					panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
   557  				}
   558  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   559  					panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
   560  				}
   561  			}
   562  		}()
   563  	}
   564  
   565  	// raise events
   566  	time.Sleep(1 * time.Second)
   567  	// allLogs are type of core.PendingLogsEvent
   568  	for _, l := range allLogs {
   569  		if err := mux.Post(l); err != nil {
   570  			t.Fatal(err)
   571  		}
   572  	}
   573  }