github.com/samgwo/go-ethereum@v1.8.2-0.20180302101319-49bcb5fbd55e/eth/filters/filter_system_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package filters 18 19 import ( 20 "context" 21 "fmt" 22 "math/big" 23 "math/rand" 24 "reflect" 25 "testing" 26 "time" 27 28 ethereum "github.com/ethereum/go-ethereum" 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/consensus/ethash" 31 "github.com/ethereum/go-ethereum/core" 32 "github.com/ethereum/go-ethereum/core/bloombits" 33 "github.com/ethereum/go-ethereum/core/types" 34 "github.com/ethereum/go-ethereum/ethdb" 35 "github.com/ethereum/go-ethereum/event" 36 "github.com/ethereum/go-ethereum/params" 37 "github.com/ethereum/go-ethereum/rpc" 38 ) 39 40 type testBackend struct { 41 mux *event.TypeMux 42 db ethdb.Database 43 sections uint64 44 txFeed *event.Feed 45 rmLogsFeed *event.Feed 46 logsFeed *event.Feed 47 chainFeed *event.Feed 48 } 49 50 func (b *testBackend) ChainDb() ethdb.Database { 51 return b.db 52 } 53 54 func (b *testBackend) EventMux() *event.TypeMux { 55 return b.mux 56 } 57 58 func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { 59 var hash common.Hash 60 var num uint64 61 if blockNr == rpc.LatestBlockNumber { 62 hash = core.GetHeadBlockHash(b.db) 63 num = core.GetBlockNumber(b.db, hash) 64 } else { 65 num = uint64(blockNr) 66 hash = core.GetCanonicalHash(b.db, num) 67 } 68 return core.GetHeader(b.db, hash, num), nil 69 } 70 71 func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) { 72 number := core.GetBlockNumber(b.db, blockHash) 73 return core.GetBlockReceipts(b.db, blockHash, number), nil 74 } 75 76 func (b *testBackend) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) { 77 number := core.GetBlockNumber(b.db, blockHash) 78 receipts := core.GetBlockReceipts(b.db, blockHash, number) 79 80 logs := make([][]*types.Log, len(receipts)) 81 for i, receipt := range receipts { 82 logs[i] = receipt.Logs 83 } 84 return logs, nil 85 } 86 87 func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { 88 return b.txFeed.Subscribe(ch) 89 } 90 91 func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { 92 return b.rmLogsFeed.Subscribe(ch) 93 } 94 95 func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 96 return b.logsFeed.Subscribe(ch) 97 } 98 99 func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { 100 return b.chainFeed.Subscribe(ch) 101 } 102 103 func (b *testBackend) BloomStatus() (uint64, uint64) { 104 return params.BloomBitsBlocks, b.sections 105 } 106 107 func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { 108 requests := make(chan chan *bloombits.Retrieval) 109 110 go session.Multiplex(16, 0, requests) 111 go func() { 112 for { 113 // Wait for a service request or a shutdown 114 select { 115 case <-ctx.Done(): 116 return 117 118 case request := <-requests: 119 task := <-request 120 121 task.Bitsets = make([][]byte, len(task.Sections)) 122 for i, section := range task.Sections { 123 if rand.Int()%4 != 0 { // Handle occasional missing deliveries 124 head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1) 125 task.Bitsets[i], _ = core.GetBloomBits(b.db, task.Bit, section, head) 126 } 127 } 128 request <- task 129 } 130 } 131 }() 132 } 133 134 // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. 135 // It creates multiple subscriptions: 136 // - one at the start and should receive all posted chain events and a second (blockHashes) 137 // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2]) 138 // - one that is created after the second cutoff moment (blockHashes[cutoff2:]) 139 func TestBlockSubscription(t *testing.T) { 140 t.Parallel() 141 142 var ( 143 mux = new(event.TypeMux) 144 db, _ = ethdb.NewMemDatabase() 145 txFeed = new(event.Feed) 146 rmLogsFeed = new(event.Feed) 147 logsFeed = new(event.Feed) 148 chainFeed = new(event.Feed) 149 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 150 api = NewPublicFilterAPI(backend, false) 151 genesis = new(core.Genesis).MustCommit(db) 152 chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) 153 chainEvents = []core.ChainEvent{} 154 ) 155 156 for _, blk := range chain { 157 chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk}) 158 } 159 160 chan0 := make(chan *types.Header) 161 sub0 := api.events.SubscribeNewHeads(chan0) 162 chan1 := make(chan *types.Header) 163 sub1 := api.events.SubscribeNewHeads(chan1) 164 165 go func() { // simulate client 166 i1, i2 := 0, 0 167 for i1 != len(chainEvents) || i2 != len(chainEvents) { 168 select { 169 case header := <-chan0: 170 if chainEvents[i1].Hash != header.Hash() { 171 t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash()) 172 } 173 i1++ 174 case header := <-chan1: 175 if chainEvents[i2].Hash != header.Hash() { 176 t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash()) 177 } 178 i2++ 179 } 180 } 181 182 sub0.Unsubscribe() 183 sub1.Unsubscribe() 184 }() 185 186 time.Sleep(1 * time.Second) 187 for _, e := range chainEvents { 188 chainFeed.Send(e) 189 } 190 191 <-sub0.Err() 192 <-sub1.Err() 193 } 194 195 // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. 196 func TestPendingTxFilter(t *testing.T) { 197 t.Parallel() 198 199 var ( 200 mux = new(event.TypeMux) 201 db, _ = ethdb.NewMemDatabase() 202 txFeed = new(event.Feed) 203 rmLogsFeed = new(event.Feed) 204 logsFeed = new(event.Feed) 205 chainFeed = new(event.Feed) 206 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 207 api = NewPublicFilterAPI(backend, false) 208 209 transactions = []*types.Transaction{ 210 types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 211 types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 212 types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 213 types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 214 types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 215 } 216 217 hashes []common.Hash 218 ) 219 220 fid0 := api.NewPendingTransactionFilter() 221 222 time.Sleep(1 * time.Second) 223 for _, tx := range transactions { 224 ev := core.TxPreEvent{Tx: tx} 225 txFeed.Send(ev) 226 } 227 228 timeout := time.Now().Add(1 * time.Second) 229 for { 230 results, err := api.GetFilterChanges(fid0) 231 if err != nil { 232 t.Fatalf("Unable to retrieve logs: %v", err) 233 } 234 235 h := results.([]common.Hash) 236 hashes = append(hashes, h...) 237 if len(hashes) >= len(transactions) { 238 break 239 } 240 // check timeout 241 if time.Now().After(timeout) { 242 break 243 } 244 245 time.Sleep(100 * time.Millisecond) 246 } 247 248 if len(hashes) != len(transactions) { 249 t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes)) 250 return 251 } 252 for i := range hashes { 253 if hashes[i] != transactions[i].Hash() { 254 t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i]) 255 } 256 } 257 } 258 259 // TestLogFilterCreation test whether a given filter criteria makes sense. 260 // If not it must return an error. 261 func TestLogFilterCreation(t *testing.T) { 262 var ( 263 mux = new(event.TypeMux) 264 db, _ = ethdb.NewMemDatabase() 265 txFeed = new(event.Feed) 266 rmLogsFeed = new(event.Feed) 267 logsFeed = new(event.Feed) 268 chainFeed = new(event.Feed) 269 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 270 api = NewPublicFilterAPI(backend, false) 271 272 testCases = []struct { 273 crit FilterCriteria 274 success bool 275 }{ 276 // defaults 277 {FilterCriteria{}, true}, 278 // valid block number range 279 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true}, 280 // "mined" block range to pending 281 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true}, 282 // new mined and pending blocks 283 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true}, 284 // from block "higher" than to block 285 {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false}, 286 // from block "higher" than to block 287 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 288 // from block "higher" than to block 289 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 290 // from block "higher" than to block 291 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false}, 292 } 293 ) 294 295 for i, test := range testCases { 296 _, err := api.NewFilter(test.crit) 297 if test.success && err != nil { 298 t.Errorf("expected filter creation for case %d to success, got %v", i, err) 299 } 300 if !test.success && err == nil { 301 t.Errorf("expected testcase %d to fail with an error", i) 302 } 303 } 304 } 305 306 // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error 307 // when the filter is created. 308 func TestInvalidLogFilterCreation(t *testing.T) { 309 t.Parallel() 310 311 var ( 312 mux = new(event.TypeMux) 313 db, _ = ethdb.NewMemDatabase() 314 txFeed = new(event.Feed) 315 rmLogsFeed = new(event.Feed) 316 logsFeed = new(event.Feed) 317 chainFeed = new(event.Feed) 318 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 319 api = NewPublicFilterAPI(backend, false) 320 ) 321 322 // different situations where log filter creation should fail. 323 // Reason: fromBlock > toBlock 324 testCases := []FilterCriteria{ 325 0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 326 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 327 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 328 } 329 330 for i, test := range testCases { 331 if _, err := api.NewFilter(test); err == nil { 332 t.Errorf("Expected NewFilter for case #%d to fail", i) 333 } 334 } 335 } 336 337 // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. 338 func TestLogFilter(t *testing.T) { 339 t.Parallel() 340 341 var ( 342 mux = new(event.TypeMux) 343 db, _ = ethdb.NewMemDatabase() 344 txFeed = new(event.Feed) 345 rmLogsFeed = new(event.Feed) 346 logsFeed = new(event.Feed) 347 chainFeed = new(event.Feed) 348 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 349 api = NewPublicFilterAPI(backend, false) 350 351 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 352 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 353 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 354 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 355 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 356 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 357 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 358 359 // posted twice, once as vm.Logs and once as core.PendingLogsEvent 360 allLogs = []*types.Log{ 361 {Address: firstAddr}, 362 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 363 {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 364 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2}, 365 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}, 366 } 367 368 expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]} 369 expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]} 370 371 testCases = []struct { 372 crit FilterCriteria 373 expected []*types.Log 374 id rpc.ID 375 }{ 376 // match all 377 0: {FilterCriteria{}, allLogs, ""}, 378 // match none due to no matching addresses 379 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, 380 // match logs based on addresses, ignore topics 381 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 382 // match none due to no matching topics (match with address) 383 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""}, 384 // match logs based on addresses and topics 385 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 386 // match logs based on multiple addresses and "or" topics 387 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""}, 388 // logs in the pending block 389 6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""}, 390 // mined logs with block num >= 2 or pending logs 391 7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""}, 392 // all "mined" logs with block num >= 2 393 8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""}, 394 // all "mined" logs 395 9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""}, 396 // all "mined" logs with 1>= block num <=2 and topic secondTopic 397 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, 398 // all "mined" and pending logs with topic firstTopic 399 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""}, 400 // match all logs due to wildcard topic 401 12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""}, 402 } 403 ) 404 405 // create all filters 406 for i := range testCases { 407 testCases[i].id, _ = api.NewFilter(testCases[i].crit) 408 } 409 410 // raise events 411 time.Sleep(1 * time.Second) 412 if nsend := logsFeed.Send(allLogs); nsend == 0 { 413 t.Fatal("Shoud have at least one subscription") 414 } 415 if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil { 416 t.Fatal(err) 417 } 418 419 for i, tt := range testCases { 420 var fetched []*types.Log 421 timeout := time.Now().Add(1 * time.Second) 422 for { // fetch all expected logs 423 results, err := api.GetFilterChanges(tt.id) 424 if err != nil { 425 t.Fatalf("Unable to fetch logs: %v", err) 426 } 427 428 fetched = append(fetched, results.([]*types.Log)...) 429 if len(fetched) >= len(tt.expected) { 430 break 431 } 432 // check timeout 433 if time.Now().After(timeout) { 434 break 435 } 436 437 time.Sleep(100 * time.Millisecond) 438 } 439 440 if len(fetched) != len(tt.expected) { 441 t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 442 return 443 } 444 445 for l := range fetched { 446 if fetched[l].Removed { 447 t.Errorf("expected log not to be removed for log %d in case %d", l, i) 448 } 449 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 450 t.Errorf("invalid log on index %d for case %d", l, i) 451 } 452 } 453 } 454 } 455 456 // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed. 457 func TestPendingLogsSubscription(t *testing.T) { 458 t.Parallel() 459 460 var ( 461 mux = new(event.TypeMux) 462 db, _ = ethdb.NewMemDatabase() 463 txFeed = new(event.Feed) 464 rmLogsFeed = new(event.Feed) 465 logsFeed = new(event.Feed) 466 chainFeed = new(event.Feed) 467 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 468 api = NewPublicFilterAPI(backend, false) 469 470 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 471 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 472 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 473 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 474 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 475 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 476 thirdTopic = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333") 477 fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") 478 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 479 480 allLogs = []core.PendingLogsEvent{ 481 {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}}, 482 {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}}, 483 {Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}}, 484 {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}}, 485 {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}}, 486 {Logs: []*types.Log{ 487 {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 488 {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5}, 489 {Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5}, 490 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 491 }}, 492 } 493 494 convertLogs = func(pl []core.PendingLogsEvent) []*types.Log { 495 var logs []*types.Log 496 for _, l := range pl { 497 logs = append(logs, l.Logs...) 498 } 499 return logs 500 } 501 502 testCases = []struct { 503 crit ethereum.FilterQuery 504 expected []*types.Log 505 c chan []*types.Log 506 sub *Subscription 507 }{ 508 // match all 509 {ethereum.FilterQuery{}, convertLogs(allLogs), nil, nil}, 510 // match none due to no matching addresses 511 {ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil}, 512 // match logs based on addresses, ignore topics 513 {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, 514 // match none due to no matching topics (match with address) 515 {ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil}, 516 // match logs based on addresses and topics 517 {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil}, 518 // match logs based on multiple addresses and "or" topics 519 {ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil}, 520 // block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes 521 {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, 522 // multiple pending logs, should match only 2 topics from the logs in block 5 523 {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil}, 524 } 525 ) 526 527 // create all subscriptions, this ensures all subscriptions are created before the events are posted. 528 // on slow machines this could otherwise lead to missing events when the subscription is created after 529 // (some) events are posted. 530 for i := range testCases { 531 testCases[i].c = make(chan []*types.Log) 532 testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) 533 } 534 535 for n, test := range testCases { 536 i := n 537 tt := test 538 go func() { 539 var fetched []*types.Log 540 fetchLoop: 541 for { 542 logs := <-tt.c 543 fetched = append(fetched, logs...) 544 if len(fetched) >= len(tt.expected) { 545 break fetchLoop 546 } 547 } 548 549 if len(fetched) != len(tt.expected) { 550 panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))) 551 } 552 553 for l := range fetched { 554 if fetched[l].Removed { 555 panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i)) 556 } 557 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 558 panic(fmt.Sprintf("invalid log on index %d for case %d", l, i)) 559 } 560 } 561 }() 562 } 563 564 // raise events 565 time.Sleep(1 * time.Second) 566 // allLogs are type of core.PendingLogsEvent 567 for _, l := range allLogs { 568 if err := mux.Post(l); err != nil { 569 t.Fatal(err) 570 } 571 } 572 }