github.com/arieschain/arieschain@v0.0.0-20191023063405-37c074544356/qct/filters/filter_system_test.go (about)

     1  package filters
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"math/big"
     7  	"math/rand"
     8  	"reflect"
     9  	"testing"
    10  	"time"
    11  
    12  	quickchain "github.com/quickchainproject/quickchain"
    13  	"github.com/quickchainproject/quickchain/common"
    14  	"github.com/quickchainproject/quickchain/consensus/qcthash"
    15  	"github.com/quickchainproject/quickchain/core"
    16  	"github.com/quickchainproject/quickchain/core/bloombits"
    17  	"github.com/quickchainproject/quickchain/core/types"
    18  	"github.com/quickchainproject/quickchain/event"
    19  	"github.com/quickchainproject/quickchain/qctdb"
    20  	"github.com/quickchainproject/quickchain/params"
    21  	"github.com/quickchainproject/quickchain/rpc"
    22  )
    23  
    24  type testBackend struct {
    25  	mux        *event.TypeMux
    26  	db         qctdb.Database
    27  	sections   uint64
    28  	txFeed     *event.Feed
    29  	rmLogsFeed *event.Feed
    30  	logsFeed   *event.Feed
    31  	chainFeed  *event.Feed
    32  }
    33  
    34  func (b *testBackend) ChainDb() qctdb.Database {
    35  	return b.db
    36  }
    37  
    38  func (b *testBackend) EventMux() *event.TypeMux {
    39  	return b.mux
    40  }
    41  
    42  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    43  	var hash common.Hash
    44  	var num uint64
    45  	if blockNr == rpc.LatestBlockNumber {
    46  		hash = core.GetHeadBlockHash(b.db)
    47  		num = core.GetBlockNumber(b.db, hash)
    48  	} else {
    49  		num = uint64(blockNr)
    50  		hash = core.GetCanonicalHash(b.db, num)
    51  	}
    52  	return core.GetHeader(b.db, hash, num), nil
    53  }
    54  
    55  func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) {
    56  	number := core.GetBlockNumber(b.db, blockHash)
    57  	return core.GetBlockReceipts(b.db, blockHash, number), nil
    58  }
    59  
    60  func (b *testBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) {
    61  	number := core.GetBlockNumber(b.db, blockHash)
    62  	receipts := core.GetBlockReceipts(b.db, blockHash, number)
    63  
    64  	logs := make([][]*types.Log, len(receipts))
    65  	for i, receipt := range receipts {
    66  		logs[i] = receipt.Logs
    67  	}
    68  	return logs, nil
    69  }
    70  
    71  func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
    72  	return b.txFeed.Subscribe(ch)
    73  }
    74  
    75  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
    76  	return b.rmLogsFeed.Subscribe(ch)
    77  }
    78  
    79  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
    80  	return b.logsFeed.Subscribe(ch)
    81  }
    82  
    83  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
    84  	return b.chainFeed.Subscribe(ch)
    85  }
    86  
    87  func (b *testBackend) BloomStatus() (uint64, uint64) {
    88  	return params.BloomBitsBlocks, b.sections
    89  }
    90  
    91  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
    92  	requests := make(chan chan *bloombits.Retrieval)
    93  
    94  	go session.Multiplex(16, 0, requests)
    95  	go func() {
    96  		for {
    97  			// Wait for a service request or a shutdown
    98  			select {
    99  			case <-ctx.Done():
   100  				return
   101  
   102  			case request := <-requests:
   103  				task := <-request
   104  
   105  				task.Bitsets = make([][]byte, len(task.Sections))
   106  				for i, section := range task.Sections {
   107  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   108  						head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   109  						task.Bitsets[i], _ = core.GetBloomBits(b.db, task.Bit, section, head)
   110  					}
   111  				}
   112  				request <- task
   113  			}
   114  		}
   115  	}()
   116  }
   117  
   118  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   119  // It creates multiple subscriptions:
   120  // - one at the start and should receive all posted chain events and a second (blockHashes)
   121  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   122  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   123  func TestBlockSubscription(t *testing.T) {
   124  	t.Parallel()
   125  
   126  	var (
   127  		mux         = new(event.TypeMux)
   128  		db, _       = qctdb.NewMemDatabase()
   129  		txFeed      = new(event.Feed)
   130  		rmLogsFeed  = new(event.Feed)
   131  		logsFeed    = new(event.Feed)
   132  		chainFeed   = new(event.Feed)
   133  		backend     = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   134  		api         = NewPublicFilterAPI(backend, false)
   135  		genesis     = new(core.Genesis).MustCommit(db)
   136  		chain, _    = core.GenerateChain(params.TestChainConfig, genesis, qcthash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
   137  		chainEvents = []core.ChainEvent{}
   138  	)
   139  
   140  	for _, blk := range chain {
   141  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   142  	}
   143  
   144  	chan0 := make(chan *types.Header)
   145  	sub0 := api.events.SubscribeNewHeads(chan0)
   146  	chan1 := make(chan *types.Header)
   147  	sub1 := api.events.SubscribeNewHeads(chan1)
   148  
   149  	go func() { // simulate client
   150  		i1, i2 := 0, 0
   151  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   152  			select {
   153  			case header := <-chan0:
   154  				if chainEvents[i1].Hash != header.Hash() {
   155  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   156  				}
   157  				i1++
   158  			case header := <-chan1:
   159  				if chainEvents[i2].Hash != header.Hash() {
   160  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   161  				}
   162  				i2++
   163  			}
   164  		}
   165  
   166  		sub0.Unsubscribe()
   167  		sub1.Unsubscribe()
   168  	}()
   169  
   170  	time.Sleep(1 * time.Second)
   171  	for _, e := range chainEvents {
   172  		chainFeed.Send(e)
   173  	}
   174  
   175  	<-sub0.Err()
   176  	<-sub1.Err()
   177  }
   178  
   179  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   180  func TestPendingTxFilter(t *testing.T) {
   181  	t.Parallel()
   182  
   183  	var (
   184  		mux        = new(event.TypeMux)
   185  		db, _      = qctdb.NewMemDatabase()
   186  		txFeed     = new(event.Feed)
   187  		rmLogsFeed = new(event.Feed)
   188  		logsFeed   = new(event.Feed)
   189  		chainFeed  = new(event.Feed)
   190  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   191  		api        = NewPublicFilterAPI(backend, false)
   192  
   193  		transactions = []*types.Transaction{
   194  			types.NewTransaction(types.Binary, 0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   195  			types.NewTransaction(types.Binary, 1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   196  			types.NewTransaction(types.Binary, 2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   197  			types.NewTransaction(types.Binary, 3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   198  			types.NewTransaction(types.Binary, 4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   199  		}
   200  
   201  		hashes []common.Hash
   202  	)
   203  
   204  	fid0 := api.NewPendingTransactionFilter()
   205  
   206  	time.Sleep(1 * time.Second)
   207  	for _, tx := range transactions {
   208  		ev := core.TxPreEvent{Tx: tx}
   209  		txFeed.Send(ev)
   210  	}
   211  
   212  	timeout := time.Now().Add(1 * time.Second)
   213  	for {
   214  		results, err := api.GetFilterChanges(fid0)
   215  		if err != nil {
   216  			t.Fatalf("Unable to retrieve logs: %v", err)
   217  		}
   218  
   219  		h := results.([]common.Hash)
   220  		hashes = append(hashes, h...)
   221  		if len(hashes) >= len(transactions) {
   222  			break
   223  		}
   224  		// check timeout
   225  		if time.Now().After(timeout) {
   226  			break
   227  		}
   228  
   229  		time.Sleep(100 * time.Millisecond)
   230  	}
   231  
   232  	if len(hashes) != len(transactions) {
   233  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   234  		return
   235  	}
   236  	for i := range hashes {
   237  		if hashes[i] != transactions[i].Hash() {
   238  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   239  		}
   240  	}
   241  }
   242  
   243  // TestLogFilterCreation test whether a given filter criteria makes sense.
   244  // If not it must return an error.
   245  func TestLogFilterCreation(t *testing.T) {
   246  	var (
   247  		mux        = new(event.TypeMux)
   248  		db, _      = qctdb.NewMemDatabase()
   249  		txFeed     = new(event.Feed)
   250  		rmLogsFeed = new(event.Feed)
   251  		logsFeed   = new(event.Feed)
   252  		chainFeed  = new(event.Feed)
   253  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   254  		api        = NewPublicFilterAPI(backend, false)
   255  
   256  		testCases = []struct {
   257  			crit    FilterCriteria
   258  			success bool
   259  		}{
   260  			// defaults
   261  			{FilterCriteria{}, true},
   262  			// valid block number range
   263  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   264  			// "mined" block range to pending
   265  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   266  			// new mined and pending blocks
   267  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   268  			// from block "higher" than to block
   269  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   270  			// from block "higher" than to block
   271  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   272  			// from block "higher" than to block
   273  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   274  			// from block "higher" than to block
   275  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   276  		}
   277  	)
   278  
   279  	for i, test := range testCases {
   280  		_, err := api.NewFilter(test.crit)
   281  		if test.success && err != nil {
   282  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   283  		}
   284  		if !test.success && err == nil {
   285  			t.Errorf("expected testcase %d to fail with an error", i)
   286  		}
   287  	}
   288  }
   289  
   290  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   291  // when the filter is created.
   292  func TestInvalidLogFilterCreation(t *testing.T) {
   293  	t.Parallel()
   294  
   295  	var (
   296  		mux        = new(event.TypeMux)
   297  		db, _      = qctdb.NewMemDatabase()
   298  		txFeed     = new(event.Feed)
   299  		rmLogsFeed = new(event.Feed)
   300  		logsFeed   = new(event.Feed)
   301  		chainFeed  = new(event.Feed)
   302  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   303  		api        = NewPublicFilterAPI(backend, false)
   304  	)
   305  
   306  	// different situations where log filter creation should fail.
   307  	// Reason: fromBlock > toBlock
   308  	testCases := []FilterCriteria{
   309  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   310  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   311  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   312  	}
   313  
   314  	for i, test := range testCases {
   315  		if _, err := api.NewFilter(test); err == nil {
   316  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   317  		}
   318  	}
   319  }
   320  
   321  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   322  func TestLogFilter(t *testing.T) {
   323  	t.Parallel()
   324  
   325  	var (
   326  		mux        = new(event.TypeMux)
   327  		db, _      = qctdb.NewMemDatabase()
   328  		txFeed     = new(event.Feed)
   329  		rmLogsFeed = new(event.Feed)
   330  		logsFeed   = new(event.Feed)
   331  		chainFeed  = new(event.Feed)
   332  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   333  		api        = NewPublicFilterAPI(backend, false)
   334  
   335  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   336  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   337  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   338  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   339  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   340  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   341  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   342  
   343  		// posted twice, once as vm.Logs and once as core.PendingLogsEvent
   344  		allLogs = []*types.Log{
   345  			{Address: firstAddr},
   346  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   347  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   348  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   349  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   350  		}
   351  
   352  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   353  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   354  
   355  		testCases = []struct {
   356  			crit     FilterCriteria
   357  			expected []*types.Log
   358  			id       rpc.ID
   359  		}{
   360  			// match all
   361  			0: {FilterCriteria{}, allLogs, ""},
   362  			// match none due to no matching addresses
   363  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   364  			// match logs based on addresses, ignore topics
   365  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   366  			// match none due to no matching topics (match with address)
   367  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   368  			// match logs based on addresses and topics
   369  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   370  			// match logs based on multiple addresses and "or" topics
   371  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   372  			// logs in the pending block
   373  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   374  			// mined logs with block num >= 2 or pending logs
   375  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   376  			// all "mined" logs with block num >= 2
   377  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   378  			// all "mined" logs
   379  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   380  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   381  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   382  			// all "mined" and pending logs with topic firstTopic
   383  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   384  			// match all logs due to wildcard topic
   385  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   386  		}
   387  	)
   388  
   389  	// create all filters
   390  	for i := range testCases {
   391  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   392  	}
   393  
   394  	// raise events
   395  	time.Sleep(1 * time.Second)
   396  	if nsend := logsFeed.Send(allLogs); nsend == 0 {
   397  		t.Fatal("Shoud have at least one subscription")
   398  	}
   399  	if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil {
   400  		t.Fatal(err)
   401  	}
   402  
   403  	for i, tt := range testCases {
   404  		var fetched []*types.Log
   405  		timeout := time.Now().Add(1 * time.Second)
   406  		for { // fetch all expected logs
   407  			results, err := api.GetFilterChanges(tt.id)
   408  			if err != nil {
   409  				t.Fatalf("Unable to fetch logs: %v", err)
   410  			}
   411  
   412  			fetched = append(fetched, results.([]*types.Log)...)
   413  			if len(fetched) >= len(tt.expected) {
   414  				break
   415  			}
   416  			// check timeout
   417  			if time.Now().After(timeout) {
   418  				break
   419  			}
   420  
   421  			time.Sleep(100 * time.Millisecond)
   422  		}
   423  
   424  		if len(fetched) != len(tt.expected) {
   425  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   426  			return
   427  		}
   428  
   429  		for l := range fetched {
   430  			if fetched[l].Removed {
   431  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   432  			}
   433  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   434  				t.Errorf("invalid log on index %d for case %d", l, i)
   435  			}
   436  		}
   437  	}
   438  }
   439  
   440  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   441  func TestPendingLogsSubscription(t *testing.T) {
   442  	t.Parallel()
   443  
   444  	var (
   445  		mux        = new(event.TypeMux)
   446  		db, _      = qctdb.NewMemDatabase()
   447  		txFeed     = new(event.Feed)
   448  		rmLogsFeed = new(event.Feed)
   449  		logsFeed   = new(event.Feed)
   450  		chainFeed  = new(event.Feed)
   451  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   452  		api        = NewPublicFilterAPI(backend, false)
   453  
   454  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   455  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   456  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   457  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   458  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   459  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   460  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   461  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   462  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   463  
   464  		allLogs = []core.PendingLogsEvent{
   465  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
   466  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
   467  			{Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
   468  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
   469  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
   470  			{Logs: []*types.Log{
   471  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   472  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   473  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   474  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   475  			}},
   476  		}
   477  
   478  		convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
   479  			var logs []*types.Log
   480  			for _, l := range pl {
   481  				logs = append(logs, l.Logs...)
   482  			}
   483  			return logs
   484  		}
   485  
   486  		testCases = []struct {
   487  			crit     quickchain.FilterQuery
   488  			expected []*types.Log
   489  			c        chan []*types.Log
   490  			sub      *Subscription
   491  		}{
   492  			// match all
   493  			{quickchain.FilterQuery{}, convertLogs(allLogs), nil, nil},
   494  			// match none due to no matching addresses
   495  			{quickchain.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil},
   496  			// match logs based on addresses, ignore topics
   497  			{quickchain.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   498  			// match none due to no matching topics (match with address)
   499  			{quickchain.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
   500  			// match logs based on addresses and topics
   501  			{quickchain.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
   502  			// match logs based on multiple addresses and "or" topics
   503  			{quickchain.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
   504  			// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
   505  			{quickchain.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   506  			// multiple pending logs, should match only 2 topics from the logs in block 5
   507  			{quickchain.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
   508  		}
   509  	)
   510  
   511  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   512  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   513  	// (some) events are posted.
   514  	for i := range testCases {
   515  		testCases[i].c = make(chan []*types.Log)
   516  		testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   517  	}
   518  
   519  	for n, test := range testCases {
   520  		i := n
   521  		tt := test
   522  		go func() {
   523  			var fetched []*types.Log
   524  		fetchLoop:
   525  			for {
   526  				logs := <-tt.c
   527  				fetched = append(fetched, logs...)
   528  				if len(fetched) >= len(tt.expected) {
   529  					break fetchLoop
   530  				}
   531  			}
   532  
   533  			if len(fetched) != len(tt.expected) {
   534  				panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
   535  			}
   536  
   537  			for l := range fetched {
   538  				if fetched[l].Removed {
   539  					panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
   540  				}
   541  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   542  					panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
   543  				}
   544  			}
   545  		}()
   546  	}
   547  
   548  	// raise events
   549  	time.Sleep(1 * time.Second)
   550  	// allLogs are type of core.PendingLogsEvent
   551  	for _, l := range allLogs {
   552  		if err := mux.Post(l); err != nil {
   553  			t.Fatal(err)
   554  		}
   555  	}
   556  }