github.com/eduardonunesp/go-ethereum@v1.8.9-0.20180514135602-f6bc65fc6811/eth/filters/filter_system_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/big"
    23  	"math/rand"
    24  	"reflect"
    25  	"testing"
    26  	"time"
    27  
    28  	ethereum "github.com/ethereum/go-ethereum"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/consensus/ethash"
    31  	"github.com/ethereum/go-ethereum/core"
    32  	"github.com/ethereum/go-ethereum/core/bloombits"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/types"
    35  	"github.com/ethereum/go-ethereum/ethdb"
    36  	"github.com/ethereum/go-ethereum/event"
    37  	"github.com/ethereum/go-ethereum/params"
    38  	"github.com/ethereum/go-ethereum/rpc"
    39  )
    40  
    41  type testBackend struct {
    42  	mux        *event.TypeMux
    43  	db         ethdb.Database
    44  	sections   uint64
    45  	txFeed     *event.Feed
    46  	rmLogsFeed *event.Feed
    47  	logsFeed   *event.Feed
    48  	chainFeed  *event.Feed
    49  }
    50  
    51  func (b *testBackend) ChainDb() ethdb.Database {
    52  	return b.db
    53  }
    54  
    55  func (b *testBackend) EventMux() *event.TypeMux {
    56  	return b.mux
    57  }
    58  
    59  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    60  	var (
    61  		hash common.Hash
    62  		num  uint64
    63  	)
    64  	if blockNr == rpc.LatestBlockNumber {
    65  		hash = rawdb.ReadHeadBlockHash(b.db)
    66  		number := rawdb.ReadHeaderNumber(b.db, hash)
    67  		if number == nil {
    68  			return nil, nil
    69  		}
    70  		num = *number
    71  	} else {
    72  		num = uint64(blockNr)
    73  		hash = rawdb.ReadCanonicalHash(b.db, num)
    74  	}
    75  	return rawdb.ReadHeader(b.db, hash, num), nil
    76  }
    77  
    78  func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
    79  	if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {
    80  		return rawdb.ReadReceipts(b.db, hash, *number), nil
    81  	}
    82  	return nil, nil
    83  }
    84  
    85  func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
    86  	number := rawdb.ReadHeaderNumber(b.db, hash)
    87  	if number == nil {
    88  		return nil, nil
    89  	}
    90  	receipts := rawdb.ReadReceipts(b.db, hash, *number)
    91  
    92  	logs := make([][]*types.Log, len(receipts))
    93  	for i, receipt := range receipts {
    94  		logs[i] = receipt.Logs
    95  	}
    96  	return logs, nil
    97  }
    98  
    99  func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
   100  	return b.txFeed.Subscribe(ch)
   101  }
   102  
   103  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
   104  	return b.rmLogsFeed.Subscribe(ch)
   105  }
   106  
   107  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
   108  	return b.logsFeed.Subscribe(ch)
   109  }
   110  
   111  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
   112  	return b.chainFeed.Subscribe(ch)
   113  }
   114  
   115  func (b *testBackend) BloomStatus() (uint64, uint64) {
   116  	return params.BloomBitsBlocks, b.sections
   117  }
   118  
   119  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
   120  	requests := make(chan chan *bloombits.Retrieval)
   121  
   122  	go session.Multiplex(16, 0, requests)
   123  	go func() {
   124  		for {
   125  			// Wait for a service request or a shutdown
   126  			select {
   127  			case <-ctx.Done():
   128  				return
   129  
   130  			case request := <-requests:
   131  				task := <-request
   132  
   133  				task.Bitsets = make([][]byte, len(task.Sections))
   134  				for i, section := range task.Sections {
   135  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   136  						head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   137  						task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
   138  					}
   139  				}
   140  				request <- task
   141  			}
   142  		}
   143  	}()
   144  }
   145  
   146  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   147  // It creates multiple subscriptions:
   148  // - one at the start and should receive all posted chain events and a second (blockHashes)
   149  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   150  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   151  func TestBlockSubscription(t *testing.T) {
   152  	t.Parallel()
   153  
   154  	var (
   155  		mux         = new(event.TypeMux)
   156  		db          = ethdb.NewMemDatabase()
   157  		txFeed      = new(event.Feed)
   158  		rmLogsFeed  = new(event.Feed)
   159  		logsFeed    = new(event.Feed)
   160  		chainFeed   = new(event.Feed)
   161  		backend     = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   162  		api         = NewPublicFilterAPI(backend, false)
   163  		genesis     = new(core.Genesis).MustCommit(db)
   164  		chain, _    = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
   165  		chainEvents = []core.ChainEvent{}
   166  	)
   167  
   168  	for _, blk := range chain {
   169  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   170  	}
   171  
   172  	chan0 := make(chan *types.Header)
   173  	sub0 := api.events.SubscribeNewHeads(chan0)
   174  	chan1 := make(chan *types.Header)
   175  	sub1 := api.events.SubscribeNewHeads(chan1)
   176  
   177  	go func() { // simulate client
   178  		i1, i2 := 0, 0
   179  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   180  			select {
   181  			case header := <-chan0:
   182  				if chainEvents[i1].Hash != header.Hash() {
   183  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   184  				}
   185  				i1++
   186  			case header := <-chan1:
   187  				if chainEvents[i2].Hash != header.Hash() {
   188  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   189  				}
   190  				i2++
   191  			}
   192  		}
   193  
   194  		sub0.Unsubscribe()
   195  		sub1.Unsubscribe()
   196  	}()
   197  
   198  	time.Sleep(1 * time.Second)
   199  	for _, e := range chainEvents {
   200  		chainFeed.Send(e)
   201  	}
   202  
   203  	<-sub0.Err()
   204  	<-sub1.Err()
   205  }
   206  
   207  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   208  func TestPendingTxFilter(t *testing.T) {
   209  	t.Parallel()
   210  
   211  	var (
   212  		mux        = new(event.TypeMux)
   213  		db         = ethdb.NewMemDatabase()
   214  		txFeed     = new(event.Feed)
   215  		rmLogsFeed = new(event.Feed)
   216  		logsFeed   = new(event.Feed)
   217  		chainFeed  = new(event.Feed)
   218  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   219  		api        = NewPublicFilterAPI(backend, false)
   220  
   221  		transactions = []*types.Transaction{
   222  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   223  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   224  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   225  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   226  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   227  		}
   228  
   229  		hashes []common.Hash
   230  	)
   231  
   232  	fid0 := api.NewPendingTransactionFilter()
   233  
   234  	time.Sleep(1 * time.Second)
   235  	for _, tx := range transactions {
   236  		ev := core.TxPreEvent{Tx: tx}
   237  		txFeed.Send(ev)
   238  	}
   239  
   240  	timeout := time.Now().Add(1 * time.Second)
   241  	for {
   242  		results, err := api.GetFilterChanges(fid0)
   243  		if err != nil {
   244  			t.Fatalf("Unable to retrieve logs: %v", err)
   245  		}
   246  
   247  		h := results.([]common.Hash)
   248  		hashes = append(hashes, h...)
   249  		if len(hashes) >= len(transactions) {
   250  			break
   251  		}
   252  		// check timeout
   253  		if time.Now().After(timeout) {
   254  			break
   255  		}
   256  
   257  		time.Sleep(100 * time.Millisecond)
   258  	}
   259  
   260  	if len(hashes) != len(transactions) {
   261  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   262  		return
   263  	}
   264  	for i := range hashes {
   265  		if hashes[i] != transactions[i].Hash() {
   266  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   267  		}
   268  	}
   269  }
   270  
   271  // TestLogFilterCreation test whether a given filter criteria makes sense.
   272  // If not it must return an error.
   273  func TestLogFilterCreation(t *testing.T) {
   274  	var (
   275  		mux        = new(event.TypeMux)
   276  		db         = ethdb.NewMemDatabase()
   277  		txFeed     = new(event.Feed)
   278  		rmLogsFeed = new(event.Feed)
   279  		logsFeed   = new(event.Feed)
   280  		chainFeed  = new(event.Feed)
   281  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   282  		api        = NewPublicFilterAPI(backend, false)
   283  
   284  		testCases = []struct {
   285  			crit    FilterCriteria
   286  			success bool
   287  		}{
   288  			// defaults
   289  			{FilterCriteria{}, true},
   290  			// valid block number range
   291  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   292  			// "mined" block range to pending
   293  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   294  			// new mined and pending blocks
   295  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   296  			// from block "higher" than to block
   297  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   298  			// from block "higher" than to block
   299  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   300  			// from block "higher" than to block
   301  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   302  			// from block "higher" than to block
   303  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   304  		}
   305  	)
   306  
   307  	for i, test := range testCases {
   308  		_, err := api.NewFilter(test.crit)
   309  		if test.success && err != nil {
   310  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   311  		}
   312  		if !test.success && err == nil {
   313  			t.Errorf("expected testcase %d to fail with an error", i)
   314  		}
   315  	}
   316  }
   317  
   318  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   319  // when the filter is created.
   320  func TestInvalidLogFilterCreation(t *testing.T) {
   321  	t.Parallel()
   322  
   323  	var (
   324  		mux        = new(event.TypeMux)
   325  		db         = ethdb.NewMemDatabase()
   326  		txFeed     = new(event.Feed)
   327  		rmLogsFeed = new(event.Feed)
   328  		logsFeed   = new(event.Feed)
   329  		chainFeed  = new(event.Feed)
   330  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   331  		api        = NewPublicFilterAPI(backend, false)
   332  	)
   333  
   334  	// different situations where log filter creation should fail.
   335  	// Reason: fromBlock > toBlock
   336  	testCases := []FilterCriteria{
   337  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   338  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   339  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   340  	}
   341  
   342  	for i, test := range testCases {
   343  		if _, err := api.NewFilter(test); err == nil {
   344  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   345  		}
   346  	}
   347  }
   348  
   349  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   350  func TestLogFilter(t *testing.T) {
   351  	t.Parallel()
   352  
   353  	var (
   354  		mux        = new(event.TypeMux)
   355  		db         = ethdb.NewMemDatabase()
   356  		txFeed     = new(event.Feed)
   357  		rmLogsFeed = new(event.Feed)
   358  		logsFeed   = new(event.Feed)
   359  		chainFeed  = new(event.Feed)
   360  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   361  		api        = NewPublicFilterAPI(backend, false)
   362  
   363  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   364  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   365  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   366  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   367  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   368  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   369  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   370  
   371  		// posted twice, once as vm.Logs and once as core.PendingLogsEvent
   372  		allLogs = []*types.Log{
   373  			{Address: firstAddr},
   374  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   375  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   376  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   377  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   378  		}
   379  
   380  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   381  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   382  
   383  		testCases = []struct {
   384  			crit     FilterCriteria
   385  			expected []*types.Log
   386  			id       rpc.ID
   387  		}{
   388  			// match all
   389  			0: {FilterCriteria{}, allLogs, ""},
   390  			// match none due to no matching addresses
   391  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   392  			// match logs based on addresses, ignore topics
   393  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   394  			// match none due to no matching topics (match with address)
   395  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   396  			// match logs based on addresses and topics
   397  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   398  			// match logs based on multiple addresses and "or" topics
   399  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   400  			// logs in the pending block
   401  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   402  			// mined logs with block num >= 2 or pending logs
   403  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   404  			// all "mined" logs with block num >= 2
   405  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   406  			// all "mined" logs
   407  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   408  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   409  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   410  			// all "mined" and pending logs with topic firstTopic
   411  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   412  			// match all logs due to wildcard topic
   413  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   414  		}
   415  	)
   416  
   417  	// create all filters
   418  	for i := range testCases {
   419  		testCases[i].id, _ = api.NewFilter(testCases[i].crit)
   420  	}
   421  
   422  	// raise events
   423  	time.Sleep(1 * time.Second)
   424  	if nsend := logsFeed.Send(allLogs); nsend == 0 {
   425  		t.Fatal("Shoud have at least one subscription")
   426  	}
   427  	if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil {
   428  		t.Fatal(err)
   429  	}
   430  
   431  	for i, tt := range testCases {
   432  		var fetched []*types.Log
   433  		timeout := time.Now().Add(1 * time.Second)
   434  		for { // fetch all expected logs
   435  			results, err := api.GetFilterChanges(tt.id)
   436  			if err != nil {
   437  				t.Fatalf("Unable to fetch logs: %v", err)
   438  			}
   439  
   440  			fetched = append(fetched, results.([]*types.Log)...)
   441  			if len(fetched) >= len(tt.expected) {
   442  				break
   443  			}
   444  			// check timeout
   445  			if time.Now().After(timeout) {
   446  				break
   447  			}
   448  
   449  			time.Sleep(100 * time.Millisecond)
   450  		}
   451  
   452  		if len(fetched) != len(tt.expected) {
   453  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   454  			return
   455  		}
   456  
   457  		for l := range fetched {
   458  			if fetched[l].Removed {
   459  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   460  			}
   461  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   462  				t.Errorf("invalid log on index %d for case %d", l, i)
   463  			}
   464  		}
   465  	}
   466  }
   467  
   468  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   469  func TestPendingLogsSubscription(t *testing.T) {
   470  	t.Parallel()
   471  
   472  	var (
   473  		mux        = new(event.TypeMux)
   474  		db         = ethdb.NewMemDatabase()
   475  		txFeed     = new(event.Feed)
   476  		rmLogsFeed = new(event.Feed)
   477  		logsFeed   = new(event.Feed)
   478  		chainFeed  = new(event.Feed)
   479  		backend    = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
   480  		api        = NewPublicFilterAPI(backend, false)
   481  
   482  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   483  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   484  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   485  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   486  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   487  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   488  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   489  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   490  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   491  
   492  		allLogs = []core.PendingLogsEvent{
   493  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
   494  			{Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
   495  			{Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
   496  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
   497  			{Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
   498  			{Logs: []*types.Log{
   499  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   500  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   501  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   502  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   503  			}},
   504  		}
   505  
   506  		convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
   507  			var logs []*types.Log
   508  			for _, l := range pl {
   509  				logs = append(logs, l.Logs...)
   510  			}
   511  			return logs
   512  		}
   513  
   514  		testCases = []struct {
   515  			crit     ethereum.FilterQuery
   516  			expected []*types.Log
   517  			c        chan []*types.Log
   518  			sub      *Subscription
   519  		}{
   520  			// match all
   521  			{ethereum.FilterQuery{}, convertLogs(allLogs), nil, nil},
   522  			// match none due to no matching addresses
   523  			{ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil},
   524  			// match logs based on addresses, ignore topics
   525  			{ethereum.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   526  			// match none due to no matching topics (match with address)
   527  			{ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
   528  			// match logs based on addresses and topics
   529  			{ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
   530  			// match logs based on multiple addresses and "or" topics
   531  			{ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
   532  			// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
   533  			{ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
   534  			// multiple pending logs, should match only 2 topics from the logs in block 5
   535  			{ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
   536  		}
   537  	)
   538  
   539  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   540  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   541  	// (some) events are posted.
   542  	for i := range testCases {
   543  		testCases[i].c = make(chan []*types.Log)
   544  		testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   545  	}
   546  
   547  	for n, test := range testCases {
   548  		i := n
   549  		tt := test
   550  		go func() {
   551  			var fetched []*types.Log
   552  		fetchLoop:
   553  			for {
   554  				logs := <-tt.c
   555  				fetched = append(fetched, logs...)
   556  				if len(fetched) >= len(tt.expected) {
   557  					break fetchLoop
   558  				}
   559  			}
   560  
   561  			if len(fetched) != len(tt.expected) {
   562  				panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
   563  			}
   564  
   565  			for l := range fetched {
   566  				if fetched[l].Removed {
   567  					panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
   568  				}
   569  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   570  					panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
   571  				}
   572  			}
   573  		}()
   574  	}
   575  
   576  	// raise events
   577  	time.Sleep(1 * time.Second)
   578  	// allLogs are type of core.PendingLogsEvent
   579  	for _, l := range allLogs {
   580  		if err := mux.Post(l); err != nil {
   581  			t.Fatal(err)
   582  		}
   583  	}
   584  }