github.com/tuotoo/go-ethereum@v1.7.4-0.20171121184211-049797d40a24/eth/filters/filter_system_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package filters 18 19 import ( 20 "context" 21 "fmt" 22 "math/big" 23 "math/rand" 24 "reflect" 25 "testing" 26 "time" 27 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/core" 30 "github.com/ethereum/go-ethereum/core/bloombits" 31 "github.com/ethereum/go-ethereum/core/types" 32 "github.com/ethereum/go-ethereum/ethdb" 33 "github.com/ethereum/go-ethereum/event" 34 "github.com/ethereum/go-ethereum/params" 35 "github.com/ethereum/go-ethereum/rpc" 36 ) 37 38 type testBackend struct { 39 mux *event.TypeMux 40 db ethdb.Database 41 sections uint64 42 txFeed *event.Feed 43 rmLogsFeed *event.Feed 44 logsFeed *event.Feed 45 chainFeed *event.Feed 46 } 47 48 func (b *testBackend) ChainDb() ethdb.Database { 49 return b.db 50 } 51 52 func (b *testBackend) EventMux() *event.TypeMux { 53 return b.mux 54 } 55 56 func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { 57 var hash common.Hash 58 var num uint64 59 if blockNr == rpc.LatestBlockNumber { 60 hash = core.GetHeadBlockHash(b.db) 61 num = core.GetBlockNumber(b.db, hash) 62 } else { 63 num = uint64(blockNr) 64 hash = core.GetCanonicalHash(b.db, num) 65 } 66 return core.GetHeader(b.db, hash, num), nil 67 } 68 69 func (b *testBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) { 70 num := core.GetBlockNumber(b.db, blockHash) 71 return core.GetBlockReceipts(b.db, blockHash, num), nil 72 } 73 74 func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { 75 return b.txFeed.Subscribe(ch) 76 } 77 78 func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { 79 return b.rmLogsFeed.Subscribe(ch) 80 } 81 82 func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 83 return b.logsFeed.Subscribe(ch) 84 } 85 86 func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { 87 return b.chainFeed.Subscribe(ch) 88 } 89 90 func (b *testBackend) BloomStatus() (uint64, uint64) { 91 return params.BloomBitsBlocks, b.sections 92 } 93 94 func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { 95 requests := make(chan chan *bloombits.Retrieval) 96 97 go session.Multiplex(16, 0, requests) 98 go func() { 99 for { 100 // Wait for a service request or a shutdown 101 select { 102 case <-ctx.Done(): 103 return 104 105 case request := <-requests: 106 task := <-request 107 108 task.Bitsets = make([][]byte, len(task.Sections)) 109 for i, section := range task.Sections { 110 if rand.Int()%4 != 0 { // Handle occasional missing deliveries 111 head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1) 112 task.Bitsets[i], _ = core.GetBloomBits(b.db, task.Bit, section, head) 113 } 114 } 115 request <- task 116 } 117 } 118 }() 119 } 120 121 // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. 122 // It creates multiple subscriptions: 123 // - one at the start and should receive all posted chain events and a second (blockHashes) 124 // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2]) 125 // - one that is created after the second cutoff moment (blockHashes[cutoff2:]) 126 func TestBlockSubscription(t *testing.T) { 127 t.Parallel() 128 129 var ( 130 mux = new(event.TypeMux) 131 db, _ = ethdb.NewMemDatabase() 132 txFeed = new(event.Feed) 133 rmLogsFeed = new(event.Feed) 134 logsFeed = new(event.Feed) 135 chainFeed = new(event.Feed) 136 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 137 api = NewPublicFilterAPI(backend, false) 138 genesis = new(core.Genesis).MustCommit(db) 139 chain, _ = core.GenerateChain(params.TestChainConfig, genesis, db, 10, func(i int, gen *core.BlockGen) {}) 140 chainEvents = []core.ChainEvent{} 141 ) 142 143 for _, blk := range chain { 144 chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk}) 145 } 146 147 chan0 := make(chan *types.Header) 148 sub0 := api.events.SubscribeNewHeads(chan0) 149 chan1 := make(chan *types.Header) 150 sub1 := api.events.SubscribeNewHeads(chan1) 151 152 go func() { // simulate client 153 i1, i2 := 0, 0 154 for i1 != len(chainEvents) || i2 != len(chainEvents) { 155 select { 156 case header := <-chan0: 157 if chainEvents[i1].Hash != header.Hash() { 158 t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash()) 159 } 160 i1++ 161 case header := <-chan1: 162 if chainEvents[i2].Hash != header.Hash() { 163 t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash()) 164 } 165 i2++ 166 } 167 } 168 169 sub0.Unsubscribe() 170 sub1.Unsubscribe() 171 }() 172 173 time.Sleep(1 * time.Second) 174 for _, e := range chainEvents { 175 chainFeed.Send(e) 176 } 177 178 <-sub0.Err() 179 <-sub1.Err() 180 } 181 182 // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. 183 func TestPendingTxFilter(t *testing.T) { 184 t.Parallel() 185 186 var ( 187 mux = new(event.TypeMux) 188 db, _ = ethdb.NewMemDatabase() 189 txFeed = new(event.Feed) 190 rmLogsFeed = new(event.Feed) 191 logsFeed = new(event.Feed) 192 chainFeed = new(event.Feed) 193 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 194 api = NewPublicFilterAPI(backend, false) 195 196 transactions = []*types.Transaction{ 197 types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil), 198 types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil), 199 types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil), 200 types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil), 201 types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil), 202 } 203 204 hashes []common.Hash 205 ) 206 207 fid0 := api.NewPendingTransactionFilter() 208 209 time.Sleep(1 * time.Second) 210 for _, tx := range transactions { 211 ev := core.TxPreEvent{Tx: tx} 212 txFeed.Send(ev) 213 } 214 215 timeout := time.Now().Add(1 * time.Second) 216 for { 217 results, err := api.GetFilterChanges(fid0) 218 if err != nil { 219 t.Fatalf("Unable to retrieve logs: %v", err) 220 } 221 222 h := results.([]common.Hash) 223 hashes = append(hashes, h...) 224 if len(hashes) >= len(transactions) { 225 break 226 } 227 // check timeout 228 if time.Now().After(timeout) { 229 break 230 } 231 232 time.Sleep(100 * time.Millisecond) 233 } 234 235 if len(hashes) != len(transactions) { 236 t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes)) 237 return 238 } 239 for i := range hashes { 240 if hashes[i] != transactions[i].Hash() { 241 t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i]) 242 } 243 } 244 } 245 246 // TestLogFilterCreation test whether a given filter criteria makes sense. 247 // If not it must return an error. 248 func TestLogFilterCreation(t *testing.T) { 249 var ( 250 mux = new(event.TypeMux) 251 db, _ = ethdb.NewMemDatabase() 252 txFeed = new(event.Feed) 253 rmLogsFeed = new(event.Feed) 254 logsFeed = new(event.Feed) 255 chainFeed = new(event.Feed) 256 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 257 api = NewPublicFilterAPI(backend, false) 258 259 testCases = []struct { 260 crit FilterCriteria 261 success bool 262 }{ 263 // defaults 264 {FilterCriteria{}, true}, 265 // valid block number range 266 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true}, 267 // "mined" block range to pending 268 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true}, 269 // new mined and pending blocks 270 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true}, 271 // from block "higher" than to block 272 {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false}, 273 // from block "higher" than to block 274 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 275 // from block "higher" than to block 276 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 277 // from block "higher" than to block 278 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false}, 279 } 280 ) 281 282 for i, test := range testCases { 283 _, err := api.NewFilter(test.crit) 284 if test.success && err != nil { 285 t.Errorf("expected filter creation for case %d to success, got %v", i, err) 286 } 287 if !test.success && err == nil { 288 t.Errorf("expected testcase %d to fail with an error", i) 289 } 290 } 291 } 292 293 // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error 294 // when the filter is created. 295 func TestInvalidLogFilterCreation(t *testing.T) { 296 t.Parallel() 297 298 var ( 299 mux = new(event.TypeMux) 300 db, _ = ethdb.NewMemDatabase() 301 txFeed = new(event.Feed) 302 rmLogsFeed = new(event.Feed) 303 logsFeed = new(event.Feed) 304 chainFeed = new(event.Feed) 305 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 306 api = NewPublicFilterAPI(backend, false) 307 ) 308 309 // different situations where log filter creation should fail. 310 // Reason: fromBlock > toBlock 311 testCases := []FilterCriteria{ 312 0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 313 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 314 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 315 } 316 317 for i, test := range testCases { 318 if _, err := api.NewFilter(test); err == nil { 319 t.Errorf("Expected NewFilter for case #%d to fail", i) 320 } 321 } 322 } 323 324 // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. 325 func TestLogFilter(t *testing.T) { 326 t.Parallel() 327 328 var ( 329 mux = new(event.TypeMux) 330 db, _ = ethdb.NewMemDatabase() 331 txFeed = new(event.Feed) 332 rmLogsFeed = new(event.Feed) 333 logsFeed = new(event.Feed) 334 chainFeed = new(event.Feed) 335 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 336 api = NewPublicFilterAPI(backend, false) 337 338 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 339 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 340 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 341 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 342 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 343 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 344 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 345 346 // posted twice, once as vm.Logs and once as core.PendingLogsEvent 347 allLogs = []*types.Log{ 348 {Address: firstAddr}, 349 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 350 {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 351 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2}, 352 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}, 353 } 354 355 expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]} 356 expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]} 357 358 testCases = []struct { 359 crit FilterCriteria 360 expected []*types.Log 361 id rpc.ID 362 }{ 363 // match all 364 0: {FilterCriteria{}, allLogs, ""}, 365 // match none due to no matching addresses 366 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, 367 // match logs based on addresses, ignore topics 368 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 369 // match none due to no matching topics (match with address) 370 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""}, 371 // match logs based on addresses and topics 372 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 373 // match logs based on multiple addresses and "or" topics 374 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""}, 375 // logs in the pending block 376 6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""}, 377 // mined logs with block num >= 2 or pending logs 378 7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""}, 379 // all "mined" logs with block num >= 2 380 8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""}, 381 // all "mined" logs 382 9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""}, 383 // all "mined" logs with 1>= block num <=2 and topic secondTopic 384 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, 385 // all "mined" and pending logs with topic firstTopic 386 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""}, 387 // match all logs due to wildcard topic 388 12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""}, 389 } 390 ) 391 392 // create all filters 393 for i := range testCases { 394 testCases[i].id, _ = api.NewFilter(testCases[i].crit) 395 } 396 397 // raise events 398 time.Sleep(1 * time.Second) 399 if nsend := logsFeed.Send(allLogs); nsend == 0 { 400 t.Fatal("Shoud have at least one subscription") 401 } 402 if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil { 403 t.Fatal(err) 404 } 405 406 for i, tt := range testCases { 407 var fetched []*types.Log 408 timeout := time.Now().Add(1 * time.Second) 409 for { // fetch all expected logs 410 results, err := api.GetFilterChanges(tt.id) 411 if err != nil { 412 t.Fatalf("Unable to fetch logs: %v", err) 413 } 414 415 fetched = append(fetched, results.([]*types.Log)...) 416 if len(fetched) >= len(tt.expected) { 417 break 418 } 419 // check timeout 420 if time.Now().After(timeout) { 421 break 422 } 423 424 time.Sleep(100 * time.Millisecond) 425 } 426 427 if len(fetched) != len(tt.expected) { 428 t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 429 return 430 } 431 432 for l := range fetched { 433 if fetched[l].Removed { 434 t.Errorf("expected log not to be removed for log %d in case %d", l, i) 435 } 436 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 437 t.Errorf("invalid log on index %d for case %d", l, i) 438 } 439 } 440 } 441 } 442 443 // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed. 444 func TestPendingLogsSubscription(t *testing.T) { 445 t.Parallel() 446 447 var ( 448 mux = new(event.TypeMux) 449 db, _ = ethdb.NewMemDatabase() 450 txFeed = new(event.Feed) 451 rmLogsFeed = new(event.Feed) 452 logsFeed = new(event.Feed) 453 chainFeed = new(event.Feed) 454 backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed} 455 api = NewPublicFilterAPI(backend, false) 456 457 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 458 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 459 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 460 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 461 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 462 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 463 thirdTopic = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333") 464 fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") 465 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 466 467 allLogs = []core.PendingLogsEvent{ 468 {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}}, 469 {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}}, 470 {Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}}, 471 {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}}, 472 {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}}, 473 {Logs: []*types.Log{ 474 {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 475 {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5}, 476 {Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5}, 477 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 478 }}, 479 } 480 481 convertLogs = func(pl []core.PendingLogsEvent) []*types.Log { 482 var logs []*types.Log 483 for _, l := range pl { 484 logs = append(logs, l.Logs...) 485 } 486 return logs 487 } 488 489 testCases = []struct { 490 crit FilterCriteria 491 expected []*types.Log 492 c chan []*types.Log 493 sub *Subscription 494 }{ 495 // match all 496 {FilterCriteria{}, convertLogs(allLogs), nil, nil}, 497 // match none due to no matching addresses 498 {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil}, 499 // match logs based on addresses, ignore topics 500 {FilterCriteria{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, 501 // match none due to no matching topics (match with address) 502 {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil}, 503 // match logs based on addresses and topics 504 {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil}, 505 // match logs based on multiple addresses and "or" topics 506 {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil}, 507 // block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes 508 {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, 509 // multiple pending logs, should match only 2 topics from the logs in block 5 510 {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil}, 511 } 512 ) 513 514 // create all subscriptions, this ensures all subscriptions are created before the events are posted. 515 // on slow machines this could otherwise lead to missing events when the subscription is created after 516 // (some) events are posted. 517 for i := range testCases { 518 testCases[i].c = make(chan []*types.Log) 519 testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) 520 } 521 522 for n, test := range testCases { 523 i := n 524 tt := test 525 go func() { 526 var fetched []*types.Log 527 fetchLoop: 528 for { 529 logs := <-tt.c 530 fetched = append(fetched, logs...) 531 if len(fetched) >= len(tt.expected) { 532 break fetchLoop 533 } 534 } 535 536 if len(fetched) != len(tt.expected) { 537 panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))) 538 } 539 540 for l := range fetched { 541 if fetched[l].Removed { 542 panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i)) 543 } 544 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 545 panic(fmt.Sprintf("invalid log on index %d for case %d", l, i)) 546 } 547 } 548 }() 549 } 550 551 // raise events 552 time.Sleep(1 * time.Second) 553 // allLogs are type of core.PendingLogsEvent 554 for _, l := range allLogs { 555 if err := mux.Post(l); err != nil { 556 t.Fatal(err) 557 } 558 } 559 }