github.com/kisexp/xdchain@v0.0.0-20211206025815-490d6b732aa7/eth/filters/filter_system_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/big"
    23  	"math/rand"
    24  	"reflect"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/kisexp/xdchain"
    29  	"github.com/kisexp/xdchain/common"
    30  	"github.com/kisexp/xdchain/consensus/ethash"
    31  	"github.com/kisexp/xdchain/core"
    32  	"github.com/kisexp/xdchain/core/bloombits"
    33  	"github.com/kisexp/xdchain/core/mps"
    34  	"github.com/kisexp/xdchain/core/rawdb"
    35  	"github.com/kisexp/xdchain/core/types"
    36  	"github.com/kisexp/xdchain/core/vm"
    37  	"github.com/kisexp/xdchain/ethdb"
    38  	"github.com/kisexp/xdchain/event"
    39  	"github.com/kisexp/xdchain/params"
    40  	"github.com/kisexp/xdchain/rpc"
    41  )
    42  
    43  type testBackend struct {
    44  	mux             *event.TypeMux
    45  	db              ethdb.Database
    46  	sections        uint64
    47  	txFeed          event.Feed
    48  	logsFeed        event.Feed
    49  	rmLogsFeed      event.Feed
    50  	pendingLogsFeed event.Feed
    51  	chainFeed       event.Feed
    52  }
    53  
    54  func (b *testBackend) ChainDb() ethdb.Database {
    55  	return b.db
    56  }
    57  
    58  func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
    59  	var (
    60  		hash common.Hash
    61  		num  uint64
    62  	)
    63  	if blockNr == rpc.LatestBlockNumber {
    64  		hash = rawdb.ReadHeadBlockHash(b.db)
    65  		number := rawdb.ReadHeaderNumber(b.db, hash)
    66  		if number == nil {
    67  			return nil, nil
    68  		}
    69  		num = *number
    70  	} else {
    71  		num = uint64(blockNr)
    72  		hash = rawdb.ReadCanonicalHash(b.db, num)
    73  	}
    74  	return rawdb.ReadHeader(b.db, hash, num), nil
    75  }
    76  
    77  func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
    78  	number := rawdb.ReadHeaderNumber(b.db, hash)
    79  	if number == nil {
    80  		return nil, nil
    81  	}
    82  	return rawdb.ReadHeader(b.db, hash, *number), nil
    83  }
    84  
    85  func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
    86  	if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {
    87  		receipts := rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig)
    88  
    89  		psm, err := b.PSMR().ResolveForUserContext(ctx)
    90  		if err != nil {
    91  			return nil, err
    92  		}
    93  
    94  		psiReceipts := make([]*types.Receipt, len(receipts))
    95  		for i := 0; i < len(receipts); i++ {
    96  			psiReceipts[i] = receipts[i]
    97  			if receipts[i].PSReceipts != nil {
    98  				psReceipt, found := receipts[i].PSReceipts[psm.ID]
    99  				if found {
   100  					psiReceipts[i] = psReceipt
   101  				}
   102  			}
   103  		}
   104  		return psiReceipts, nil
   105  	}
   106  	return nil, nil
   107  }
   108  
   109  func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
   110  	receipts, err := b.GetReceipts(ctx, hash)
   111  	if err != nil {
   112  		return nil, err
   113  	}
   114  	if receipts == nil {
   115  		return nil, nil
   116  	}
   117  	logs := make([][]*types.Log, len(receipts))
   118  	for i, receipt := range receipts {
   119  		logs[i] = receipt.Logs
   120  	}
   121  	return logs, nil
   122  }
   123  
   124  func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
   125  	return b.txFeed.Subscribe(ch)
   126  }
   127  
   128  func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
   129  	return b.rmLogsFeed.Subscribe(ch)
   130  }
   131  
   132  func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
   133  	return b.logsFeed.Subscribe(ch)
   134  }
   135  
   136  func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
   137  	return b.pendingLogsFeed.Subscribe(ch)
   138  }
   139  
   140  func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
   141  	return b.chainFeed.Subscribe(ch)
   142  }
   143  
   144  func (b *testBackend) BloomStatus() (uint64, uint64) {
   145  	return params.BloomBitsBlocks, b.sections
   146  }
   147  
   148  func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
   149  	requests := make(chan chan *bloombits.Retrieval)
   150  
   151  	go session.Multiplex(16, 0, requests)
   152  	go func() {
   153  		for {
   154  			// Wait for a service request or a shutdown
   155  			select {
   156  			case <-ctx.Done():
   157  				return
   158  
   159  			case request := <-requests:
   160  				task := <-request
   161  
   162  				task.Bitsets = make([][]byte, len(task.Sections))
   163  				for i, section := range task.Sections {
   164  					if rand.Int()%4 != 0 { // Handle occasional missing deliveries
   165  						head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
   166  						task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)
   167  					}
   168  				}
   169  				request <- task
   170  			}
   171  		}
   172  	}()
   173  }
   174  
   175  func (b *testBackend) AccountExtraDataStateGetterByNumber(context.Context, rpc.BlockNumber) (vm.AccountExtraDataStateGetter, error) {
   176  	return nil, nil
   177  }
   178  
   179  func (b *testBackend) PSMR() mps.PrivateStateMetadataResolver {
   180  	return &core.DefaultPrivateStateManager{}
   181  }
   182  
   183  // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
   184  // It creates multiple subscriptions:
   185  // - one at the start and should receive all posted chain events and a second (blockHashes)
   186  // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])
   187  // - one that is created after the second cutoff moment (blockHashes[cutoff2:])
   188  func TestBlockSubscription(t *testing.T) {
   189  	t.Parallel()
   190  
   191  	var (
   192  		db          = rawdb.NewMemoryDatabase()
   193  		backend     = &testBackend{db: db}
   194  		api         = NewPublicFilterAPI(backend, false)
   195  		genesis     = new(core.Genesis).MustCommit(db)
   196  		chain, _    = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
   197  		chainEvents = []core.ChainEvent{}
   198  	)
   199  
   200  	for _, blk := range chain {
   201  		chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})
   202  	}
   203  
   204  	chan0 := make(chan *types.Header)
   205  	sub0 := api.events.SubscribeNewHeads(chan0)
   206  	chan1 := make(chan *types.Header)
   207  	sub1 := api.events.SubscribeNewHeads(chan1)
   208  
   209  	go func() { // simulate client
   210  		i1, i2 := 0, 0
   211  		for i1 != len(chainEvents) || i2 != len(chainEvents) {
   212  			select {
   213  			case header := <-chan0:
   214  				if chainEvents[i1].Hash != header.Hash() {
   215  					t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash())
   216  				}
   217  				i1++
   218  			case header := <-chan1:
   219  				if chainEvents[i2].Hash != header.Hash() {
   220  					t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash())
   221  				}
   222  				i2++
   223  			}
   224  		}
   225  
   226  		sub0.Unsubscribe()
   227  		sub1.Unsubscribe()
   228  	}()
   229  
   230  	time.Sleep(1 * time.Second)
   231  	for _, e := range chainEvents {
   232  		backend.chainFeed.Send(e)
   233  	}
   234  
   235  	<-sub0.Err()
   236  	<-sub1.Err()
   237  }
   238  
   239  // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.
   240  func TestPendingTxFilter(t *testing.T) {
   241  	t.Parallel()
   242  
   243  	var (
   244  		db      = rawdb.NewMemoryDatabase()
   245  		backend = &testBackend{db: db}
   246  		api     = NewPublicFilterAPI(backend, false)
   247  
   248  		transactions = []*types.Transaction{
   249  			types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   250  			types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   251  			types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   252  			types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   253  			types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
   254  		}
   255  
   256  		hashes []common.Hash
   257  	)
   258  
   259  	fid0 := api.NewPendingTransactionFilter()
   260  
   261  	time.Sleep(1 * time.Second)
   262  	backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
   263  
   264  	timeout := time.Now().Add(1 * time.Second)
   265  	for {
   266  		results, err := api.GetFilterChanges(context.Background(), fid0)
   267  		if err != nil {
   268  			t.Fatalf("Unable to retrieve logs: %v", err)
   269  		}
   270  
   271  		h := results.([]common.Hash)
   272  		hashes = append(hashes, h...)
   273  		if len(hashes) >= len(transactions) {
   274  			break
   275  		}
   276  		// check timeout
   277  		if time.Now().After(timeout) {
   278  			break
   279  		}
   280  
   281  		time.Sleep(100 * time.Millisecond)
   282  	}
   283  
   284  	if len(hashes) != len(transactions) {
   285  		t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes))
   286  		return
   287  	}
   288  	for i := range hashes {
   289  		if hashes[i] != transactions[i].Hash() {
   290  			t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i])
   291  		}
   292  	}
   293  }
   294  
   295  // TestLogFilterCreation test whether a given filter criteria makes sense.
   296  // If not it must return an error.
   297  func TestLogFilterCreation(t *testing.T) {
   298  	var (
   299  		db      = rawdb.NewMemoryDatabase()
   300  		backend = &testBackend{db: db}
   301  		api     = NewPublicFilterAPI(backend, false)
   302  
   303  		testCases = []struct {
   304  			crit    FilterCriteria
   305  			success bool
   306  		}{
   307  			// defaults
   308  			{FilterCriteria{}, true},
   309  			// valid block number range
   310  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},
   311  			// "mined" block range to pending
   312  			{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},
   313  			// new mined and pending blocks
   314  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},
   315  			// from block "higher" than to block
   316  			{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},
   317  			// from block "higher" than to block
   318  			{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   319  			// from block "higher" than to block
   320  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},
   321  			// from block "higher" than to block
   322  			{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},
   323  		}
   324  	)
   325  
   326  	for i, test := range testCases {
   327  		_, err := api.NewFilter(context.Background(), test.crit)
   328  		if test.success && err != nil {
   329  			t.Errorf("expected filter creation for case %d to success, got %v", i, err)
   330  		}
   331  		if !test.success && err == nil {
   332  			t.Errorf("expected testcase %d to fail with an error", i)
   333  		}
   334  	}
   335  }
   336  
   337  // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error
   338  // when the filter is created.
   339  func TestInvalidLogFilterCreation(t *testing.T) {
   340  	t.Parallel()
   341  
   342  	var (
   343  		db      = rawdb.NewMemoryDatabase()
   344  		backend = &testBackend{db: db}
   345  		api     = NewPublicFilterAPI(backend, false)
   346  	)
   347  
   348  	// different situations where log filter creation should fail.
   349  	// Reason: fromBlock > toBlock
   350  	testCases := []FilterCriteria{
   351  		0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   352  		1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   353  		2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},
   354  	}
   355  
   356  	for i, test := range testCases {
   357  		if _, err := api.NewFilter(context.Background(), test); err == nil {
   358  			t.Errorf("Expected NewFilter for case #%d to fail", i)
   359  		}
   360  	}
   361  }
   362  
   363  func TestInvalidGetLogsRequest(t *testing.T) {
   364  	var (
   365  		db        = rawdb.NewMemoryDatabase()
   366  		backend   = &testBackend{db: db}
   367  		api       = NewPublicFilterAPI(backend, false)
   368  		blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   369  	)
   370  
   371  	// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
   372  	testCases := []FilterCriteria{
   373  		0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)},
   374  		1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)},
   375  		2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
   376  	}
   377  
   378  	for i, test := range testCases {
   379  		if _, err := api.GetLogs(context.Background(), test); err == nil {
   380  			t.Errorf("Expected Logs for case #%d to fail", i)
   381  		}
   382  	}
   383  }
   384  
   385  // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
   386  func TestLogFilter(t *testing.T) {
   387  	t.Parallel()
   388  
   389  	var (
   390  		db      = rawdb.NewMemoryDatabase()
   391  		backend = &testBackend{db: db}
   392  		api     = NewPublicFilterAPI(backend, false)
   393  
   394  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   395  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   396  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   397  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   398  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   399  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   400  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   401  
   402  		// posted twice, once as regular logs and once as pending logs.
   403  		allLogs = []*types.Log{
   404  			{Address: firstAddr},
   405  			{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   406  			{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
   407  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
   408  			{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
   409  		}
   410  
   411  		expectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
   412  		expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
   413  
   414  		testCases = []struct {
   415  			crit     FilterCriteria
   416  			expected []*types.Log
   417  			id       rpc.ID
   418  		}{
   419  			// match all
   420  			0: {FilterCriteria{}, allLogs, ""},
   421  			// match none due to no matching addresses
   422  			1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
   423  			// match logs based on addresses, ignore topics
   424  			2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
   425  			// match none due to no matching topics (match with address)
   426  			3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
   427  			// match logs based on addresses and topics
   428  			4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
   429  			// match logs based on multiple addresses and "or" topics
   430  			5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
   431  			// logs in the pending block
   432  			6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
   433  			// mined logs with block num >= 2 or pending logs
   434  			7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""},
   435  			// all "mined" logs with block num >= 2
   436  			8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""},
   437  			// all "mined" logs
   438  			9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
   439  			// all "mined" logs with 1>= block num <=2 and topic secondTopic
   440  			10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
   441  			// all "mined" and pending logs with topic firstTopic
   442  			11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
   443  			// match all logs due to wildcard topic
   444  			12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""},
   445  		}
   446  	)
   447  
   448  	// create all filters
   449  	for i := range testCases {
   450  		testCases[i].id, _ = api.NewFilter(context.Background(), testCases[i].crit)
   451  	}
   452  
   453  	// raise events
   454  	time.Sleep(1 * time.Second)
   455  	if nsend := backend.logsFeed.Send(allLogs); nsend == 0 {
   456  		t.Fatal("Logs event not delivered")
   457  	}
   458  	if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 {
   459  		t.Fatal("Pending logs event not delivered")
   460  	}
   461  
   462  	for i, tt := range testCases {
   463  		var fetched []*types.Log
   464  		timeout := time.Now().Add(1 * time.Second)
   465  		for { // fetch all expected logs
   466  			results, err := api.GetFilterChanges(context.Background(), tt.id)
   467  			if err != nil {
   468  				t.Fatalf("Unable to fetch logs: %v", err)
   469  			}
   470  
   471  			fetched = append(fetched, results.([]*types.Log)...)
   472  			if len(fetched) >= len(tt.expected) {
   473  				break
   474  			}
   475  			// check timeout
   476  			if time.Now().After(timeout) {
   477  				break
   478  			}
   479  
   480  			time.Sleep(100 * time.Millisecond)
   481  		}
   482  
   483  		if len(fetched) != len(tt.expected) {
   484  			t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
   485  			return
   486  		}
   487  
   488  		for l := range fetched {
   489  			if fetched[l].Removed {
   490  				t.Errorf("expected log not to be removed for log %d in case %d", l, i)
   491  			}
   492  			if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   493  				t.Errorf("invalid log on index %d for case %d", l, i)
   494  			}
   495  		}
   496  	}
   497  }
   498  
   499  // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.
   500  func TestPendingLogsSubscription(t *testing.T) {
   501  	t.Parallel()
   502  
   503  	var (
   504  		db      = rawdb.NewMemoryDatabase()
   505  		backend = &testBackend{db: db}
   506  		api     = NewPublicFilterAPI(backend, false)
   507  
   508  		firstAddr      = common.HexToAddress("0x1111111111111111111111111111111111111111")
   509  		secondAddr     = common.HexToAddress("0x2222222222222222222222222222222222222222")
   510  		thirdAddress   = common.HexToAddress("0x3333333333333333333333333333333333333333")
   511  		notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
   512  		firstTopic     = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
   513  		secondTopic    = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
   514  		thirdTopic     = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333")
   515  		fourthTopic    = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
   516  		notUsedTopic   = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
   517  
   518  		allLogs = [][]*types.Log{
   519  			{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}},
   520  			{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}},
   521  			{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}},
   522  			{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}},
   523  			{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}},
   524  			{
   525  				{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   526  				{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
   527  				{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
   528  				{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
   529  			},
   530  		}
   531  
   532  		testCases = []struct {
   533  			crit     ethereum.FilterQuery
   534  			expected []*types.Log
   535  			c        chan []*types.Log
   536  			sub      *Subscription
   537  		}{
   538  			// match all
   539  			{
   540  				ethereum.FilterQuery{}, flattenLogs(allLogs),
   541  				nil, nil,
   542  			},
   543  			// match none due to no matching addresses
   544  			{
   545  				ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}},
   546  				nil,
   547  				nil, nil,
   548  			},
   549  			// match logs based on addresses, ignore topics
   550  			{
   551  				ethereum.FilterQuery{Addresses: []common.Address{firstAddr}},
   552  				append(flattenLogs(allLogs[:2]), allLogs[5][3]),
   553  				nil, nil,
   554  			},
   555  			// match none due to no matching topics (match with address)
   556  			{
   557  				ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}},
   558  				nil, nil, nil,
   559  			},
   560  			// match logs based on addresses and topics
   561  			{
   562  				ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},
   563  				append(flattenLogs(allLogs[3:5]), allLogs[5][0]),
   564  				nil, nil,
   565  			},
   566  			// match logs based on multiple addresses and "or" topics
   567  			{
   568  				ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},
   569  				append(flattenLogs(allLogs[2:5]), allLogs[5][0]),
   570  				nil,
   571  				nil,
   572  			},
   573  			// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
   574  			{
   575  				ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)},
   576  				append(flattenLogs(allLogs[:2]), allLogs[5][3]),
   577  				nil, nil,
   578  			},
   579  			// multiple pending logs, should match only 2 topics from the logs in block 5
   580  			{
   581  				ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}},
   582  				[]*types.Log{allLogs[5][0], allLogs[5][2]},
   583  				nil, nil,
   584  			},
   585  		}
   586  	)
   587  
   588  	// create all subscriptions, this ensures all subscriptions are created before the events are posted.
   589  	// on slow machines this could otherwise lead to missing events when the subscription is created after
   590  	// (some) events are posted.
   591  	for i := range testCases {
   592  		testCases[i].c = make(chan []*types.Log)
   593  		testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
   594  	}
   595  
   596  	for n, test := range testCases {
   597  		i := n
   598  		tt := test
   599  		go func() {
   600  			var fetched []*types.Log
   601  		fetchLoop:
   602  			for {
   603  				logs := <-tt.c
   604  				fetched = append(fetched, logs...)
   605  				if len(fetched) >= len(tt.expected) {
   606  					break fetchLoop
   607  				}
   608  			}
   609  
   610  			if len(fetched) != len(tt.expected) {
   611  				panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
   612  			}
   613  
   614  			for l := range fetched {
   615  				if fetched[l].Removed {
   616  					panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
   617  				}
   618  				if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
   619  					panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
   620  				}
   621  			}
   622  		}()
   623  	}
   624  
   625  	// raise events
   626  	time.Sleep(1 * time.Second)
   627  	for _, ev := range allLogs {
   628  		backend.pendingLogsFeed.Send(ev)
   629  	}
   630  }
   631  
   632  func flattenLogs(pl [][]*types.Log) []*types.Log {
   633  	var logs []*types.Log
   634  	for _, l := range pl {
   635  		logs = append(logs, l...)
   636  	}
   637  	return logs
   638  }