gitlab.com/aquachain/aquachain@v1.17.16-rc3.0.20221018032414-e3ddf1e1c055/aqua/filters/filter_system_test.go (about)

     1  // Copyright 2018 The aquachain Authors
     2  // This file is part of the aquachain library.
     3  //
     4  // The aquachain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The aquachain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/big"
    23  	"math/rand"
    24  	"reflect"
    25  	"testing"
    26  	"time"
    27  
    28  	aquachain "gitlab.com/aquachain/aquachain"
    29  	"gitlab.com/aquachain/aquachain/aqua/event"
    30  	"gitlab.com/aquachain/aquachain/aquadb"
    31  	"gitlab.com/aquachain/aquachain/common"
    32  	"gitlab.com/aquachain/aquachain/consensus/aquahash"
    33  	"gitlab.com/aquachain/aquachain/core"
    34  	"gitlab.com/aquachain/aquachain/core/bloombits"
    35  	"gitlab.com/aquachain/aquachain/core/types"
    36  	"gitlab.com/aquachain/aquachain/params"
    37  	"gitlab.com/aquachain/aquachain/rpc"
    38  )
    39  
    40  type testBackend struct {
    41  	mux        *event.TypeMux
    42  	db         aquadb.Database
    43  	sections   uint64
    44  	txFeed     *event.Feed
    45  	rmLogsFeed *event.Feed
    46  	logsFeed   *event.Feed
    47  	chainFeed  *event.Feed
    48  }
    49  
    50  func (b *testBackend) ChainDb() aquadb.Database {
    51  	return b.db
    52  }
    53  
    54  func (b *testBackend) EventMux() *event.TypeMux {
    55  	return b.mux
    56  }
    57  
    58  func (b *testBackend) GetHeaderVersion(h *big.Int) params.HeaderVersion {
    59  	return params.TestChainConfig.GetBlockVersion(h)
    60  }
    61  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    62  	var hash common.Hash
    63  	var num uint64
    64  	if blockNr == rpc.LatestBlockNumber {
    65  		hash = core.GetHeadBlockHash(b.db)
    66  		num = core.GetBlockNumber(b.db, hash)
    67  	} else {
    68  		num = uint64(blockNr)
    69  		hash = core.GetCanonicalHash(b.db, num)
    70  	}
    71  	header := core.GetHeaderNoVersion(b.db, hash, num)
    72  	if header != nil {
    73  		header.Version = b.GetHeaderVersion(header.Number)
    74  	}
    75  	return header, nil
    76  }
    77  
    78  func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
    79  	number := core.GetBlockNumber(b.db, blockHash)
    80  	return core.GetBlockReceipts(b.db, blockHash, number), nil
    81  }
    82  
    83  func (b *testBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) {
    84  	number := core.GetBlockNumber(b.db, blockHash)
    85  	receipts := core.GetBlockReceipts(b.db, blockHash, number)
    86  
    87  	logs := make([][]*types.Log, len(receipts))
    88  	for i, receipt := range receipts {
    89  		logs[i] = receipt.Logs
    90  	}
    91  	return logs, nil
    92  }
    93  
    94  func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
    95  	return b.txFeed.Subscribe(ch)
    96  }
    97  
    98  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
    99  	return b.rmLogsFeed.Subscribe(ch)
   100  }
   101  
   102  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
   103  	return b.logsFeed.Subscribe(ch)
   104  }
   105  
   106  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
   107  	return b.chainFeed.Subscribe(ch)
   108  }
   109  
   110  func (b *testBackend) BloomStatus() (uint64, uint64) {
   111  	return params.BloomBitsBlocks, b.sections
   112  }
   113  
   114  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
   115  	requests := make(chan chan *bloombits.Retrieval)
   116  
   117  	go session.Multiplex(16, 0, requests)
   118  	go func() {
   119  		for {
   120  			// Wait for a service request or a shutdown
   121  			select {
   122  			case <-ctx.Done():
   123  				return
   124  
   125  			case request := <-requests:
   126  				task := <-request
   127  
   128  				task.Bitsets = make([][]byte, len(task.Sections))
   129  				for i, section := range task.Sections {
   130  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   131  						head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   132  						task.Bitsets[i], _ = core.GetBloomBits(b.db, task.Bit, section, head)
   133  					}
   134  				}
   135  				request <- task
   136  			}
   137  		}
   138  	}()
   139  }
   140  
   141  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   142  // It creates multiple subscriptions:
   143  // - one at the start and should receive all posted chain events and a second (blockHashes)
   144  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   145  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   146  func TestBlockSubscription(t *testing.T) {
   147  	t.Parallel()
   148  
   149  	var (
   150  		mux         = new(event.TypeMux)
   151  		db          = aquadb.NewMemDatabase()
   152  		txFeed      = new(event.Feed)
   153  		rmLogsFeed  = new(event.Feed)
   154  		logsFeed    = new(event.Feed)
   155  		chainFeed   = new(event.Feed)
   156  		backend     = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   157  		api         = NewPublicFilterAPI(backend, false)
   158  		genesis     = new(core.Genesis).MustCommit(db)
   159  		chain, _    = core.GenerateChain(params.TestChainConfig, genesis, aquahash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
   160  		chainEvents = []core.ChainEvent{}
   161  	)
   162  
   163  	for _, blk := range chain {
   164  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   165  	}
   166  
   167  	chan0 := make(chan *types.Header)
   168  	sub0 := api.events.SubscribeNewHeads(chan0)
   169  	chan1 := make(chan *types.Header)
   170  	sub1 := api.events.SubscribeNewHeads(chan1)
   171  
   172  	go func() { // simulate client
   173  		i1, i2 := 0, 0
   174  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   175  			select {
   176  			case header := <-chan0:
   177  				if chainEvents[i1].Hash != header.Hash() {
   178  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   179  				}
   180  				i1++
   181  			case header := <-chan1:
   182  				if chainEvents[i2].Hash != header.Hash() {
   183  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   184  				}
   185  				i2++
   186  			}
   187  		}
   188  
   189  		sub0.Unsubscribe()
   190  		sub1.Unsubscribe()
   191  	}()
   192  
   193  	time.Sleep(1 * time.Second)
   194  	for _, e := range chainEvents {
   195  		chainFeed.Send(e)
   196  	}
   197  
   198  	<-sub0.Err()
   199  	<-sub1.Err()
   200  }
   201  
   202  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   203  func TestPendingTxFilter(t *testing.T) {
   204  	t.Parallel()
   205  
   206  	var (
   207  		mux        = new(event.TypeMux)
   208  		db         = aquadb.NewMemDatabase()
   209  		txFeed     = new(event.Feed)
   210  		rmLogsFeed = new(event.Feed)
   211  		logsFeed   = new(event.Feed)
   212  		chainFeed  = new(event.Feed)
   213  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   214  		api        = NewPublicFilterAPI(backend, false)
   215  
   216  		transactions = []*types.Transaction{
   217  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   218  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   219  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   220  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   221  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   222  		}
   223  
   224  		hashes []common.Hash
   225  	)
   226  
   227  	fid0 := api.NewPendingTransactionFilter()
   228  
   229  	time.Sleep(1 * time.Second)
   230  	for _, tx := range transactions {
   231  		ev := core.TxPreEvent{Tx: tx}
   232  		txFeed.Send(ev)
   233  	}
   234  
   235  	timeout := time.Now().Add(1 * time.Second)
   236  	for {
   237  		results, err := api.GetFilterChanges(fid0)
   238  		if err != nil {
   239  			t.Fatalf("Unable to retrieve logs: %v", err)
   240  		}
   241  
   242  		h := results.([]common.Hash)
   243  		hashes = append(hashes, h...)
   244  		if len(hashes) >= len(transactions) {
   245  			break
   246  		}
   247  		// check timeout
   248  		if time.Now().After(timeout) {
   249  			break
   250  		}
   251  
   252  		time.Sleep(100 * time.Millisecond)
   253  	}
   254  
   255  	if len(hashes) != len(transactions) {
   256  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   257  		return
   258  	}
   259  	for i := range hashes {
   260  		if hashes[i] != transactions[i].Hash() {
   261  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   262  		}
   263  	}
   264  }
   265  
   266  // TestLogFilterCreation test whether a given filter criteria makes sense.
   267  // If not it must return an error.
   268  func TestLogFilterCreation(t *testing.T) {
   269  	var (
   270  		mux        = new(event.TypeMux)
   271  		db         = aquadb.NewMemDatabase()
   272  		txFeed     = new(event.Feed)
   273  		rmLogsFeed = new(event.Feed)
   274  		logsFeed   = new(event.Feed)
   275  		chainFeed  = new(event.Feed)
   276  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   277  		api        = NewPublicFilterAPI(backend, false)
   278  
   279  		testCases = []struct {
   280  			crit    FilterCriteria
   281  			success bool
   282  		}{
   283  			// defaults
   284  			{FilterCriteria{}, true},
   285  			// valid block number range
   286  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   287  			// "mined" block range to pending
   288  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   289  			// new mined and pending blocks
   290  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   291  			// from block "higher" than to block
   292  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   293  			// from block "higher" than to block
   294  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   295  			// from block "higher" than to block
   296  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   297  			// from block "higher" than to block
   298  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   299  		}
   300  	)
   301  
   302  	for i, test := range testCases {
   303  		_, err := api.NewFilter(test.crit)
   304  		if test.success && err != nil {
   305  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   306  		}
   307  		if !test.success && err == nil {
   308  			t.Errorf("expected testcase %d to fail with an error", i)
   309  		}
   310  	}
   311  }
   312  
   313  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   314  // when the filter is created.
   315  func TestInvalidLogFilterCreation(t *testing.T) {
   316  	t.Parallel()
   317  
   318  	var (
   319  		mux        = new(event.TypeMux)
   320  		db         = aquadb.NewMemDatabase()
   321  		txFeed     = new(event.Feed)
   322  		rmLogsFeed = new(event.Feed)
   323  		logsFeed   = new(event.Feed)
   324  		chainFeed  = new(event.Feed)
   325  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   326  		api        = NewPublicFilterAPI(backend, false)
   327  	)
   328  
   329  	// different situations where log filter creation should fail.
   330  	// Reason: fromBlock > toBlock
   331  	testCases := []FilterCriteria{
   332  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   333  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   334  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   335  	}
   336  
   337  	for i, test := range testCases {
   338  		if _, err := api.NewFilter(test); err == nil {
   339  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   340  		}
   341  	}
   342  }
   343  
   344  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   345  func TestLogFilter(t *testing.T) {
   346  	t.Parallel()
   347  
   348  	var (
   349  		mux        = new(event.TypeMux)
   350  		db         = aquadb.NewMemDatabase()
   351  		txFeed     = new(event.Feed)
   352  		rmLogsFeed = new(event.Feed)
   353  		logsFeed   = new(event.Feed)
   354  		chainFeed  = new(event.Feed)
   355  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   356  		api        = NewPublicFilterAPI(backend, false)
   357  
   358  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   359  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   360  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   361  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   362  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   363  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   364  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   365  
   366  		// posted twice, once as vm.Logs and once as core.PendingLogsEvent
   367  		allLogs = []*types.Log{
   368  			{Address: firstAddr},
   369  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   370  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   371  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   372  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   373  		}
   374  
   375  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   376  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   377  
   378  		testCases = []struct {
   379  			crit     FilterCriteria
   380  			expected []*types.Log
   381  			id       rpc.ID
   382  		}{
   383  			// match all
   384  			0: {FilterCriteria{}, allLogs, ""},
   385  			// match none due to no matching addresses
   386  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   387  			// match logs based on addresses, ignore topics
   388  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   389  			// match none due to no matching topics (match with address)
   390  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   391  			// match logs based on addresses and topics
   392  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   393  			// match logs based on multiple addresses and "or" topics
   394  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   395  			// logs in the pending block
   396  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   397  			// mined logs with block num >= 2 or pending logs
   398  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   399  			// all "mined" logs with block num >= 2
   400  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   401  			// all "mined" logs
   402  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   403  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   404  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   405  			// all "mined" and pending logs with topic firstTopic
   406  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   407  			// match all logs due to wildcard topic
   408  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   409  		}
   410  	)
   411  
   412  	// create all filters
   413  	for i := range testCases {
   414  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   415  	}
   416  
   417  	// raise events
   418  	time.Sleep(1 * time.Second)
   419  	if nsend := logsFeed.Send(allLogs); nsend == 0 {
   420  		t.Fatal("Shoud have at least one subscription")
   421  	}
   422  	if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil {
   423  		t.Fatal(err)
   424  	}
   425  
   426  	for i, tt := range testCases {
   427  		var fetched []*types.Log
   428  		timeout := time.Now().Add(1 * time.Second)
   429  		for { // fetch all expected logs
   430  			results, err := api.GetFilterChanges(tt.id)
   431  			if err != nil {
   432  				t.Fatalf("Unable to fetch logs: %v", err)
   433  			}
   434  
   435  			fetched = append(fetched, results.([]*types.Log)...)
   436  			if len(fetched) >= len(tt.expected) {
   437  				break
   438  			}
   439  			// check timeout
   440  			if time.Now().After(timeout) {
   441  				break
   442  			}
   443  
   444  			time.Sleep(100 * time.Millisecond)
   445  		}
   446  
   447  		if len(fetched) != len(tt.expected) {
   448  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   449  			return
   450  		}
   451  
   452  		for l := range fetched {
   453  			if fetched[l].Removed {
   454  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   455  			}
   456  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   457  				t.Errorf("invalid log on index %d for case %d", l, i)
   458  			}
   459  		}
   460  	}
   461  }
   462  
   463  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   464  func TestPendingLogsSubscription(t *testing.T) {
   465  	t.Parallel()
   466  
   467  	var (
   468  		mux        = new(event.TypeMux)
   469  		db         = aquadb.NewMemDatabase()
   470  		txFeed     = new(event.Feed)
   471  		rmLogsFeed = new(event.Feed)
   472  		logsFeed   = new(event.Feed)
   473  		chainFeed  = new(event.Feed)
   474  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   475  		api        = NewPublicFilterAPI(backend, false)
   476  
   477  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   478  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   479  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   480  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   481  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   482  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   483  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   484  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   485  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   486  
   487  		allLogs = []core.PendingLogsEvent{
   488  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
   489  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
   490  			{Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
   491  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
   492  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
   493  			{Logs: []*types.Log{
   494  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   495  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   496  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   497  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   498  			}},
   499  		}
   500  
   501  		convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
   502  			var logs []*types.Log
   503  			for _, l := range pl {
   504  				logs = append(logs, l.Logs...)
   505  			}
   506  			return logs
   507  		}
   508  
   509  		testCases = []struct {
   510  			crit     aquachain.FilterQuery
   511  			expected []*types.Log
   512  			c        chan []*types.Log
   513  			sub      *Subscription
   514  		}{
   515  			// match all
   516  			{aquachain.FilterQuery{}, convertLogs(allLogs), nil, nil},
   517  			// match none due to no matching addresses
   518  			{aquachain.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil},
   519  			// match logs based on addresses, ignore topics
   520  			{aquachain.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   521  			// match none due to no matching topics (match with address)
   522  			{aquachain.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
   523  			// match logs based on addresses and topics
   524  			{aquachain.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
   525  			// match logs based on multiple addresses and "or" topics
   526  			{aquachain.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
   527  			// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
   528  			{aquachain.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   529  			// multiple pending logs, should match only 2 topics from the logs in block 5
   530  			{aquachain.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
   531  		}
   532  	)
   533  
   534  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   535  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   536  	// (some) events are posted.
   537  	for i := range testCases {
   538  		testCases[i].c = make(chan []*types.Log)
   539  		testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   540  	}
   541  
   542  	for n, test := range testCases {
   543  		i := n
   544  		tt := test
   545  		go func() {
   546  			var fetched []*types.Log
   547  		fetchLoop:
   548  			for {
   549  				logs := <-tt.c
   550  				fetched = append(fetched, logs...)
   551  				if len(fetched) >= len(tt.expected) {
   552  					break fetchLoop
   553  				}
   554  			}
   555  
   556  			if len(fetched) != len(tt.expected) {
   557  				panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
   558  			}
   559  
   560  			for l := range fetched {
   561  				if fetched[l].Removed {
   562  					panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
   563  				}
   564  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   565  					panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
   566  				}
   567  			}
   568  		}()
   569  	}
   570  
   571  	// raise events
   572  	time.Sleep(1 * time.Second)
   573  	// allLogs are type of core.PendingLogsEvent
   574  	for _, l := range allLogs {
   575  		if err := mux.Post(l); err != nil {
   576  			t.Fatal(err)
   577  		}
   578  	}
   579  }