github.com/vantum/vantum@v0.0.0-20180815184342-fe37d5f7a990/eth/filters/filter_system_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package filters 18 19 import ( 20 "context" 21 "fmt" 22 "math/big" 23 "math/rand" 24 "reflect" 25 "testing" 26 "time" 27 28 ethereum "github.com/vantum/vantum" 29 "github.com/vantum/vantum/common" 30 "github.com/vantum/vantum/consensus/ethash" 31 "github.com/vantum/vantum/core" 32 "github.com/vantum/vantum/core/bloombits" 33 "github.com/vantum/vantum/core/types" 34 "github.com/vantum/vantum/ethdb" 35 "github.com/vantum/vantum/event" 36 "github.com/vantum/vantum/params" 37 "github.com/vantum/vantum/rpc" 38 ) 39 40 type testBackend struct { 41 mux *event.TypeMux 42 db ethdb.Database 43 sections uint64 44 txFeed *event.Feed 45 rmLogsFeed *event.Feed 46 logsFeed *event.Feed 47 chainFeed *event.Feed 48 } 49 50 func (b *testBackend) ChainDb() ethdb.Database { 51 return b.db 52 } 53 54 func (b *testBackend) EventMux() *event.TypeMux { 55 return b.mux 56 } 57 58 func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { 59 var hash common.Hash 60 var num uint64 61 if blockNr == rpc.LatestBlockNumber { 62 hash = core.GetHeadBlockHash(b.db) 63 num = core.GetBlockNumber(b.db, hash) 64 } else { 65 num = uint64(blockNr) 66 hash = core.GetCanonicalHash(b.db, num) 67 } 68 return core.GetHeader(b.db, hash, num), nil 69 } 70 71 func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) { 72 num := core.GetBlockNumber(b.db, blockHash) 73 return core.GetBlockReceipts(b.db, blockHash, num), nil 74 } 75 76 func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { 77 return b.txFeed.Subscribe(ch) 78 } 79 80 func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { 81 return b.rmLogsFeed.Subscribe(ch) 82 } 83 84 func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 85 return b.logsFeed.Subscribe(ch) 86 } 87 88 func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { 89 return b.chainFeed.Subscribe(ch) 90 } 91 92 func (b *testBackend) BloomStatus() (uint64, uint64) { 93 return params.BloomBitsBlocks, b.sections 94 } 95 96 func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { 97 requests := make(chan chan *bloombits.Retrieval) 98 99 go session.Multiplex(16, 0, requests) 100 go func() { 101 for { 102 // Wait for a service request or a shutdown 103 select { 104 case <-ctx.Done(): 105 return 106 107 case request := <-requests: 108 task := <-request 109 110 task.Bitsets = make([][]byte, len(task.Sections)) 111 for i, section := range task.Sections { 112 if rand.Int()%4 != 0 { // Handle occasional missing deliveries 113 head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1) 114 task.Bitsets[i], _ = core.GetBloomBits(b.db, task.Bit, section, head) 115 } 116 } 117 request <- task 118 } 119 } 120 }() 121 } 122 123 // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. 124 // It creates multiple subscriptions: 125 // - one at the start and should receive all posted chain events and a second (blockHashes) 126 // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2]) 127 // - one that is created after the second cutoff moment (blockHashes[cutoff2:]) 128 func TestBlockSubscription(t *testing.T) { 129 t.Parallel() 130 131 var ( 132 mux = new(event.TypeMux) 133 db, _ = ethdb.NewMemDatabase() 134 txFeed = new(event.Feed) 135 rmLogsFeed = new(event.Feed) 136 logsFeed = new(event.Feed) 137 chainFeed = new(event.Feed) 138 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 139 api = NewPublicFilterAPI(backend, false) 140 genesis = new(core.Genesis).MustCommit(db) 141 chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) 142 chainEvents = []core.ChainEvent{} 143 ) 144 145 for _, blk := range chain { 146 chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk}) 147 } 148 149 chan0 := make(chan *types.Header) 150 sub0 := api.events.SubscribeNewHeads(chan0) 151 chan1 := make(chan *types.Header) 152 sub1 := api.events.SubscribeNewHeads(chan1) 153 154 go func() { // simulate client 155 i1, i2 := 0, 0 156 for i1 != len(chainEvents) || i2 != len(chainEvents) { 157 select { 158 case header := <-chan0: 159 if chainEvents[i1].Hash != header.Hash() { 160 t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash()) 161 } 162 i1++ 163 case header := <-chan1: 164 if chainEvents[i2].Hash != header.Hash() { 165 t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash()) 166 } 167 i2++ 168 } 169 } 170 171 sub0.Unsubscribe() 172 sub1.Unsubscribe() 173 }() 174 175 time.Sleep(1 * time.Second) 176 for _, e := range chainEvents { 177 chainFeed.Send(e) 178 } 179 180 <-sub0.Err() 181 <-sub1.Err() 182 } 183 184 // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. 185 func TestPendingTxFilter(t *testing.T) { 186 t.Parallel() 187 188 var ( 189 mux = new(event.TypeMux) 190 db, _ = ethdb.NewMemDatabase() 191 txFeed = new(event.Feed) 192 rmLogsFeed = new(event.Feed) 193 logsFeed = new(event.Feed) 194 chainFeed = new(event.Feed) 195 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 196 api = NewPublicFilterAPI(backend, false) 197 198 transactions = []*types.Transaction{ 199 types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 200 types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 201 types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 202 types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 203 types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 204 } 205 206 hashes []common.Hash 207 ) 208 209 fid0 := api.NewPendingTransactionFilter() 210 211 time.Sleep(1 * time.Second) 212 for _, tx := range transactions { 213 ev := core.TxPreEvent{Tx: tx} 214 txFeed.Send(ev) 215 } 216 217 timeout := time.Now().Add(1 * time.Second) 218 for { 219 results, err := api.GetFilterChanges(fid0) 220 if err != nil { 221 t.Fatalf("Unable to retrieve logs: %v", err) 222 } 223 224 h := results.([]common.Hash) 225 hashes = append(hashes, h...) 226 if len(hashes) >= len(transactions) { 227 break 228 } 229 // check timeout 230 if time.Now().After(timeout) { 231 break 232 } 233 234 time.Sleep(100 * time.Millisecond) 235 } 236 237 if len(hashes) != len(transactions) { 238 t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes)) 239 return 240 } 241 for i := range hashes { 242 if hashes[i] != transactions[i].Hash() { 243 t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i]) 244 } 245 } 246 } 247 248 // TestLogFilterCreation test whether a given filter criteria makes sense. 249 // If not it must return an error. 250 func TestLogFilterCreation(t *testing.T) { 251 var ( 252 mux = new(event.TypeMux) 253 db, _ = ethdb.NewMemDatabase() 254 txFeed = new(event.Feed) 255 rmLogsFeed = new(event.Feed) 256 logsFeed = new(event.Feed) 257 chainFeed = new(event.Feed) 258 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 259 api = NewPublicFilterAPI(backend, false) 260 261 testCases = []struct { 262 crit FilterCriteria 263 success bool 264 }{ 265 // defaults 266 {FilterCriteria{}, true}, 267 // valid block number range 268 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true}, 269 // "mined" block range to pending 270 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true}, 271 // new mined and pending blocks 272 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true}, 273 // from block "higher" than to block 274 {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false}, 275 // from block "higher" than to block 276 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 277 // from block "higher" than to block 278 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 279 // from block "higher" than to block 280 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false}, 281 } 282 ) 283 284 for i, test := range testCases { 285 _, err := api.NewFilter(test.crit) 286 if test.success && err != nil { 287 t.Errorf("expected filter creation for case %d to success, got %v", i, err) 288 } 289 if !test.success && err == nil { 290 t.Errorf("expected testcase %d to fail with an error", i) 291 } 292 } 293 } 294 295 // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error 296 // when the filter is created. 297 func TestInvalidLogFilterCreation(t *testing.T) { 298 t.Parallel() 299 300 var ( 301 mux = new(event.TypeMux) 302 db, _ = ethdb.NewMemDatabase() 303 txFeed = new(event.Feed) 304 rmLogsFeed = new(event.Feed) 305 logsFeed = new(event.Feed) 306 chainFeed = new(event.Feed) 307 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 308 api = NewPublicFilterAPI(backend, false) 309 ) 310 311 // different situations where log filter creation should fail. 312 // Reason: fromBlock > toBlock 313 testCases := []FilterCriteria{ 314 0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 315 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 316 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 317 } 318 319 for i, test := range testCases { 320 if _, err := api.NewFilter(test); err == nil { 321 t.Errorf("Expected NewFilter for case #%d to fail", i) 322 } 323 } 324 } 325 326 // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. 327 func TestLogFilter(t *testing.T) { 328 t.Parallel() 329 330 var ( 331 mux = new(event.TypeMux) 332 db, _ = ethdb.NewMemDatabase() 333 txFeed = new(event.Feed) 334 rmLogsFeed = new(event.Feed) 335 logsFeed = new(event.Feed) 336 chainFeed = new(event.Feed) 337 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 338 api = NewPublicFilterAPI(backend, false) 339 340 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 341 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 342 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 343 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 344 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 345 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 346 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 347 348 // posted twice, once as vm.Logs and once as core.PendingLogsEvent 349 allLogs = []*types.Log{ 350 {Address: firstAddr}, 351 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 352 {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 353 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2}, 354 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}, 355 } 356 357 expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]} 358 expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]} 359 360 testCases = []struct { 361 crit FilterCriteria 362 expected []*types.Log 363 id rpc.ID 364 }{ 365 // match all 366 0: {FilterCriteria{}, allLogs, ""}, 367 // match none due to no matching addresses 368 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, 369 // match logs based on addresses, ignore topics 370 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 371 // match none due to no matching topics (match with address) 372 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""}, 373 // match logs based on addresses and topics 374 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 375 // match logs based on multiple addresses and "or" topics 376 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""}, 377 // logs in the pending block 378 6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""}, 379 // mined logs with block num >= 2 or pending logs 380 7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""}, 381 // all "mined" logs with block num >= 2 382 8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""}, 383 // all "mined" logs 384 9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""}, 385 // all "mined" logs with 1>= block num <=2 and topic secondTopic 386 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, 387 // all "mined" and pending logs with topic firstTopic 388 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""}, 389 // match all logs due to wildcard topic 390 12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""}, 391 } 392 ) 393 394 // create all filters 395 for i := range testCases { 396 testCases[i].id, _ = api.NewFilter(testCases[i].crit) 397 } 398 399 // raise events 400 time.Sleep(1 * time.Second) 401 if nsend := logsFeed.Send(allLogs); nsend == 0 { 402 t.Fatal("Shoud have at least one subscription") 403 } 404 if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil { 405 t.Fatal(err) 406 } 407 408 for i, tt := range testCases { 409 var fetched []*types.Log 410 timeout := time.Now().Add(1 * time.Second) 411 for { // fetch all expected logs 412 results, err := api.GetFilterChanges(tt.id) 413 if err != nil { 414 t.Fatalf("Unable to fetch logs: %v", err) 415 } 416 417 fetched = append(fetched, results.([]*types.Log)...) 418 if len(fetched) >= len(tt.expected) { 419 break 420 } 421 // check timeout 422 if time.Now().After(timeout) { 423 break 424 } 425 426 time.Sleep(100 * time.Millisecond) 427 } 428 429 if len(fetched) != len(tt.expected) { 430 t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 431 return 432 } 433 434 for l := range fetched { 435 if fetched[l].Removed { 436 t.Errorf("expected log not to be removed for log %d in case %d", l, i) 437 } 438 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 439 t.Errorf("invalid log on index %d for case %d", l, i) 440 } 441 } 442 } 443 } 444 445 // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed. 446 func TestPendingLogsSubscription(t *testing.T) { 447 t.Parallel() 448 449 var ( 450 mux = new(event.TypeMux) 451 db, _ = ethdb.NewMemDatabase() 452 txFeed = new(event.Feed) 453 rmLogsFeed = new(event.Feed) 454 logsFeed = new(event.Feed) 455 chainFeed = new(event.Feed) 456 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 457 api = NewPublicFilterAPI(backend, false) 458 459 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 460 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 461 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 462 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 463 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 464 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 465 thirdTopic = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333") 466 fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") 467 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 468 469 allLogs = []core.PendingLogsEvent{ 470 {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}}, 471 {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}}, 472 {Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}}, 473 {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}}, 474 {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}}, 475 {Logs: []*types.Log{ 476 {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 477 {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5}, 478 {Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5}, 479 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 480 }}, 481 } 482 483 convertLogs = func(pl []core.PendingLogsEvent) []*types.Log { 484 var logs []*types.Log 485 for _, l := range pl { 486 logs = append(logs, l.Logs...) 487 } 488 return logs 489 } 490 491 testCases = []struct { 492 crit ethereum.FilterQuery 493 expected []*types.Log 494 c chan []*types.Log 495 sub *Subscription 496 }{ 497 // match all 498 {ethereum.FilterQuery{}, convertLogs(allLogs), nil, nil}, 499 // match none due to no matching addresses 500 {ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil}, 501 // match logs based on addresses, ignore topics 502 {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, 503 // match none due to no matching topics (match with address) 504 {ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil}, 505 // match logs based on addresses and topics 506 {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil}, 507 // match logs based on multiple addresses and "or" topics 508 {ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil}, 509 // block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes 510 {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, 511 // multiple pending logs, should match only 2 topics from the logs in block 5 512 {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil}, 513 } 514 ) 515 516 // create all subscriptions, this ensures all subscriptions are created before the events are posted. 517 // on slow machines this could otherwise lead to missing events when the subscription is created after 518 // (some) events are posted. 519 for i := range testCases { 520 testCases[i].c = make(chan []*types.Log) 521 testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) 522 } 523 524 for n, test := range testCases { 525 i := n 526 tt := test 527 go func() { 528 var fetched []*types.Log 529 fetchLoop: 530 for { 531 logs := <-tt.c 532 fetched = append(fetched, logs...) 533 if len(fetched) >= len(tt.expected) { 534 break fetchLoop 535 } 536 } 537 538 if len(fetched) != len(tt.expected) { 539 panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))) 540 } 541 542 for l := range fetched { 543 if fetched[l].Removed { 544 panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i)) 545 } 546 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 547 panic(fmt.Sprintf("invalid log on index %d for case %d", l, i)) 548 } 549 } 550 }() 551 } 552 553 // raise events 554 time.Sleep(1 * time.Second) 555 // allLogs are type of core.PendingLogsEvent 556 for _, l := range allLogs { 557 if err := mux.Post(l); err != nil { 558 t.Fatal(err) 559 } 560 } 561 }