github.com/ethxdao/go-ethereum@v0.0.0-20221218102228-5ae34a9cc189/eth/filters/filter_system_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package filters 18 19 import ( 20 "context" 21 "fmt" 22 "math/big" 23 "math/rand" 24 "reflect" 25 "runtime" 26 "testing" 27 "time" 28 29 "github.com/ethxdao/go-ethereum/common" 30 "github.com/ethxdao/go-ethereum/consensus/ethash" 31 "github.com/ethxdao/go-ethereum/core" 32 "github.com/ethxdao/go-ethereum/core/bloombits" 33 "github.com/ethxdao/go-ethereum/core/rawdb" 34 "github.com/ethxdao/go-ethereum/core/types" 35 "github.com/ethxdao/go-ethereum/ethdb" 36 "github.com/ethxdao/go-ethereum/event" 37 "github.com/ethxdao/go-ethereum/params" 38 "github.com/ethxdao/go-ethereum/rpc" 39 ) 40 41 type testBackend struct { 42 db ethdb.Database 43 sections uint64 44 txFeed event.Feed 45 logsFeed event.Feed 46 rmLogsFeed event.Feed 47 pendingLogsFeed event.Feed 48 chainFeed event.Feed 49 } 50 51 func (b *testBackend) ChainDb() ethdb.Database { 52 return b.db 53 } 54 55 func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { 56 var ( 57 hash common.Hash 58 num uint64 59 ) 60 if blockNr == rpc.LatestBlockNumber { 61 hash = rawdb.ReadHeadBlockHash(b.db) 62 number := rawdb.ReadHeaderNumber(b.db, hash) 63 if number == nil { 64 return nil, nil 65 } 66 num = *number 67 } else { 68 num = uint64(blockNr) 69 hash = rawdb.ReadCanonicalHash(b.db, num) 70 } 71 return rawdb.ReadHeader(b.db, hash, num), nil 72 } 73 74 func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { 75 number := rawdb.ReadHeaderNumber(b.db, hash) 76 if number == nil { 77 return nil, nil 78 } 79 return rawdb.ReadHeader(b.db, hash, *number), nil 80 } 81 82 func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { 83 if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil { 84 return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil 85 } 86 return nil, nil 87 } 88 89 func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { 90 logs := rawdb.ReadLogs(b.db, hash, number, params.TestChainConfig) 91 return logs, nil 92 } 93 94 func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { 95 return nil, nil 96 } 97 98 func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { 99 return b.txFeed.Subscribe(ch) 100 } 101 102 func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { 103 return b.rmLogsFeed.Subscribe(ch) 104 } 105 106 func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 107 return b.logsFeed.Subscribe(ch) 108 } 109 110 func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { 111 return b.pendingLogsFeed.Subscribe(ch) 112 } 113 114 func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { 115 return b.chainFeed.Subscribe(ch) 116 } 117 118 func (b *testBackend) BloomStatus() (uint64, uint64) { 119 return params.BloomBitsBlocks, b.sections 120 } 121 122 func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { 123 requests := make(chan chan *bloombits.Retrieval) 124 125 go session.Multiplex(16, 0, requests) 126 go func() { 127 for { 128 // Wait for a service request or a shutdown 129 select { 130 case <-ctx.Done(): 131 return 132 133 case request := <-requests: 134 task := <-request 135 136 task.Bitsets = make([][]byte, len(task.Sections)) 137 for i, section := range task.Sections { 138 if rand.Int()%4 != 0 { // Handle occasional missing deliveries 139 head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1) 140 task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head) 141 } 142 } 143 request <- task 144 } 145 } 146 }() 147 } 148 149 func newTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config) (*testBackend, *FilterSystem) { 150 backend := &testBackend{db: db} 151 sys := NewFilterSystem(backend, cfg) 152 return backend, sys 153 } 154 155 // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. 156 // It creates multiple subscriptions: 157 // - one at the start and should receive all posted chain events and a second (blockHashes) 158 // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2]) 159 // - one that is created after the second cutoff moment (blockHashes[cutoff2:]) 160 func TestBlockSubscription(t *testing.T) { 161 t.Parallel() 162 163 var ( 164 db = rawdb.NewMemoryDatabase() 165 backend, sys = newTestFilterSystem(t, db, Config{}) 166 api = NewFilterAPI(sys, false) 167 genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db) 168 chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) 169 chainEvents = []core.ChainEvent{} 170 ) 171 172 for _, blk := range chain { 173 chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk}) 174 } 175 176 chan0 := make(chan *types.Header) 177 sub0 := api.events.SubscribeNewHeads(chan0) 178 chan1 := make(chan *types.Header) 179 sub1 := api.events.SubscribeNewHeads(chan1) 180 181 go func() { // simulate client 182 i1, i2 := 0, 0 183 for i1 != len(chainEvents) || i2 != len(chainEvents) { 184 select { 185 case header := <-chan0: 186 if chainEvents[i1].Hash != header.Hash() { 187 t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash()) 188 } 189 i1++ 190 case header := <-chan1: 191 if chainEvents[i2].Hash != header.Hash() { 192 t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash()) 193 } 194 i2++ 195 } 196 } 197 198 sub0.Unsubscribe() 199 sub1.Unsubscribe() 200 }() 201 202 time.Sleep(1 * time.Second) 203 for _, e := range chainEvents { 204 backend.chainFeed.Send(e) 205 } 206 207 <-sub0.Err() 208 <-sub1.Err() 209 } 210 211 // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. 212 func TestPendingTxFilter(t *testing.T) { 213 t.Parallel() 214 215 var ( 216 db = rawdb.NewMemoryDatabase() 217 backend, sys = newTestFilterSystem(t, db, Config{}) 218 api = NewFilterAPI(sys, false) 219 220 transactions = []*types.Transaction{ 221 types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 222 types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 223 types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 224 types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 225 types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 226 } 227 228 hashes []common.Hash 229 ) 230 231 fid0 := api.NewPendingTransactionFilter() 232 233 time.Sleep(1 * time.Second) 234 backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) 235 236 timeout := time.Now().Add(1 * time.Second) 237 for { 238 results, err := api.GetFilterChanges(fid0) 239 if err != nil { 240 t.Fatalf("Unable to retrieve logs: %v", err) 241 } 242 243 h := results.([]common.Hash) 244 hashes = append(hashes, h...) 245 if len(hashes) >= len(transactions) { 246 break 247 } 248 // check timeout 249 if time.Now().After(timeout) { 250 break 251 } 252 253 time.Sleep(100 * time.Millisecond) 254 } 255 256 if len(hashes) != len(transactions) { 257 t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes)) 258 return 259 } 260 for i := range hashes { 261 if hashes[i] != transactions[i].Hash() { 262 t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i]) 263 } 264 } 265 } 266 267 // TestLogFilterCreation test whether a given filter criteria makes sense. 268 // If not it must return an error. 269 func TestLogFilterCreation(t *testing.T) { 270 var ( 271 db = rawdb.NewMemoryDatabase() 272 _, sys = newTestFilterSystem(t, db, Config{}) 273 api = NewFilterAPI(sys, false) 274 275 testCases = []struct { 276 crit FilterCriteria 277 success bool 278 }{ 279 // defaults 280 {FilterCriteria{}, true}, 281 // valid block number range 282 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true}, 283 // "mined" block range to pending 284 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true}, 285 // new mined and pending blocks 286 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true}, 287 // from block "higher" than to block 288 {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false}, 289 // from block "higher" than to block 290 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 291 // from block "higher" than to block 292 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 293 // from block "higher" than to block 294 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false}, 295 } 296 ) 297 298 for i, test := range testCases { 299 id, err := api.NewFilter(test.crit) 300 if err != nil && test.success { 301 t.Errorf("expected filter creation for case %d to success, got %v", i, err) 302 } 303 if err == nil { 304 api.UninstallFilter(id) 305 if !test.success { 306 t.Errorf("expected testcase %d to fail with an error", i) 307 } 308 } 309 } 310 } 311 312 // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error 313 // when the filter is created. 314 func TestInvalidLogFilterCreation(t *testing.T) { 315 t.Parallel() 316 317 var ( 318 db = rawdb.NewMemoryDatabase() 319 _, sys = newTestFilterSystem(t, db, Config{}) 320 api = NewFilterAPI(sys, false) 321 ) 322 323 // different situations where log filter creation should fail. 324 // Reason: fromBlock > toBlock 325 testCases := []FilterCriteria{ 326 0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 327 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 328 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 329 } 330 331 for i, test := range testCases { 332 if _, err := api.NewFilter(test); err == nil { 333 t.Errorf("Expected NewFilter for case #%d to fail", i) 334 } 335 } 336 } 337 338 func TestInvalidGetLogsRequest(t *testing.T) { 339 var ( 340 db = rawdb.NewMemoryDatabase() 341 _, sys = newTestFilterSystem(t, db, Config{}) 342 api = NewFilterAPI(sys, false) 343 blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 344 ) 345 346 // Reason: Cannot specify both BlockHash and FromBlock/ToBlock) 347 testCases := []FilterCriteria{ 348 0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)}, 349 1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)}, 350 2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 351 } 352 353 for i, test := range testCases { 354 if _, err := api.GetLogs(context.Background(), test); err == nil { 355 t.Errorf("Expected Logs for case #%d to fail", i) 356 } 357 } 358 } 359 360 // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. 361 func TestLogFilter(t *testing.T) { 362 t.Parallel() 363 364 var ( 365 db = rawdb.NewMemoryDatabase() 366 backend, sys = newTestFilterSystem(t, db, Config{}) 367 api = NewFilterAPI(sys, false) 368 369 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 370 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 371 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 372 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 373 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 374 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 375 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 376 377 // posted twice, once as regular logs and once as pending logs. 378 allLogs = []*types.Log{ 379 {Address: firstAddr}, 380 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 381 {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 382 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2}, 383 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}, 384 } 385 386 expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]} 387 expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]} 388 389 testCases = []struct { 390 crit FilterCriteria 391 expected []*types.Log 392 id rpc.ID 393 }{ 394 // match all 395 0: {FilterCriteria{}, allLogs, ""}, 396 // match none due to no matching addresses 397 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, 398 // match logs based on addresses, ignore topics 399 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 400 // match none due to no matching topics (match with address) 401 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""}, 402 // match logs based on addresses and topics 403 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 404 // match logs based on multiple addresses and "or" topics 405 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""}, 406 // logs in the pending block 407 6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""}, 408 // mined logs with block num >= 2 or pending logs 409 7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""}, 410 // all "mined" logs with block num >= 2 411 8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""}, 412 // all "mined" logs 413 9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""}, 414 // all "mined" logs with 1>= block num <=2 and topic secondTopic 415 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, 416 // all "mined" and pending logs with topic firstTopic 417 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""}, 418 // match all logs due to wildcard topic 419 12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""}, 420 } 421 ) 422 423 // create all filters 424 for i := range testCases { 425 testCases[i].id, _ = api.NewFilter(testCases[i].crit) 426 } 427 428 // raise events 429 time.Sleep(1 * time.Second) 430 if nsend := backend.logsFeed.Send(allLogs); nsend == 0 { 431 t.Fatal("Logs event not delivered") 432 } 433 if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 { 434 t.Fatal("Pending logs event not delivered") 435 } 436 437 for i, tt := range testCases { 438 var fetched []*types.Log 439 timeout := time.Now().Add(1 * time.Second) 440 for { // fetch all expected logs 441 results, err := api.GetFilterChanges(tt.id) 442 if err != nil { 443 t.Fatalf("Unable to fetch logs: %v", err) 444 } 445 446 fetched = append(fetched, results.([]*types.Log)...) 447 if len(fetched) >= len(tt.expected) { 448 break 449 } 450 // check timeout 451 if time.Now().After(timeout) { 452 break 453 } 454 455 time.Sleep(100 * time.Millisecond) 456 } 457 458 if len(fetched) != len(tt.expected) { 459 t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 460 return 461 } 462 463 for l := range fetched { 464 if fetched[l].Removed { 465 t.Errorf("expected log not to be removed for log %d in case %d", l, i) 466 } 467 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 468 t.Errorf("invalid log on index %d for case %d", l, i) 469 } 470 } 471 } 472 } 473 474 // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed. 475 func TestPendingLogsSubscription(t *testing.T) { 476 t.Parallel() 477 478 var ( 479 db = rawdb.NewMemoryDatabase() 480 backend, sys = newTestFilterSystem(t, db, Config{}) 481 api = NewFilterAPI(sys, false) 482 483 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 484 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 485 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 486 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 487 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 488 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 489 thirdTopic = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333") 490 fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") 491 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 492 493 allLogs = [][]*types.Log{ 494 {{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}, 495 {{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}, 496 {{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}, 497 {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}, 498 {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}, 499 { 500 {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 501 {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5}, 502 {Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5}, 503 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 504 }, 505 } 506 507 pendingBlockNumber = big.NewInt(rpc.PendingBlockNumber.Int64()) 508 509 testCases = []struct { 510 crit ethereum.FilterQuery 511 expected []*types.Log 512 c chan []*types.Log 513 sub *Subscription 514 err chan error 515 }{ 516 // match all 517 { 518 ethereum.FilterQuery{FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 519 flattenLogs(allLogs), 520 nil, nil, nil, 521 }, 522 // match none due to no matching addresses 523 { 524 ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 525 nil, 526 nil, nil, nil, 527 }, 528 // match logs based on addresses, ignore topics 529 { 530 ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 531 append(flattenLogs(allLogs[:2]), allLogs[5][3]), 532 nil, nil, nil, 533 }, 534 // match none due to no matching topics (match with address) 535 { 536 ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 537 nil, 538 nil, nil, nil, 539 }, 540 // match logs based on addresses and topics 541 { 542 ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 543 append(flattenLogs(allLogs[3:5]), allLogs[5][0]), 544 nil, nil, nil, 545 }, 546 // match logs based on multiple addresses and "or" topics 547 { 548 ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 549 append(flattenLogs(allLogs[2:5]), allLogs[5][0]), 550 nil, nil, nil, 551 }, 552 // multiple pending logs, should match only 2 topics from the logs in block 5 553 { 554 ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 555 []*types.Log{allLogs[5][0], allLogs[5][2]}, 556 nil, nil, nil, 557 }, 558 // match none due to only matching new mined logs 559 { 560 ethereum.FilterQuery{}, 561 nil, 562 nil, nil, nil, 563 }, 564 // match none due to only matching mined logs within a specific block range 565 { 566 ethereum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, 567 nil, 568 nil, nil, nil, 569 }, 570 // match all due to matching mined and pending logs 571 { 572 ethereum.FilterQuery{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, 573 flattenLogs(allLogs), 574 nil, nil, nil, 575 }, 576 // match none due to matching logs from a specific block number to new mined blocks 577 { 578 ethereum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 579 nil, 580 nil, nil, nil, 581 }, 582 } 583 ) 584 585 // create all subscriptions, this ensures all subscriptions are created before the events are posted. 586 // on slow machines this could otherwise lead to missing events when the subscription is created after 587 // (some) events are posted. 588 for i := range testCases { 589 testCases[i].c = make(chan []*types.Log) 590 testCases[i].err = make(chan error, 1) 591 592 var err error 593 testCases[i].sub, err = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) 594 if err != nil { 595 t.Fatalf("SubscribeLogs %d failed: %v\n", i, err) 596 } 597 } 598 599 for n, test := range testCases { 600 i := n 601 tt := test 602 go func() { 603 defer tt.sub.Unsubscribe() 604 605 var fetched []*types.Log 606 607 timeout := time.After(1 * time.Second) 608 fetchLoop: 609 for { 610 select { 611 case logs := <-tt.c: 612 // Do not break early if we've fetched greater, or equal, 613 // to the number of logs expected. This ensures we do not 614 // deadlock the filter system because it will do a blocking 615 // send on this channel if another log arrives. 616 fetched = append(fetched, logs...) 617 case <-timeout: 618 break fetchLoop 619 } 620 } 621 622 if len(fetched) != len(tt.expected) { 623 tt.err <- fmt.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 624 return 625 } 626 627 for l := range fetched { 628 if fetched[l].Removed { 629 tt.err <- fmt.Errorf("expected log not to be removed for log %d in case %d", l, i) 630 return 631 } 632 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 633 tt.err <- fmt.Errorf("invalid log on index %d for case %d\n", l, i) 634 return 635 } 636 } 637 tt.err <- nil 638 }() 639 } 640 641 // raise events 642 for _, ev := range allLogs { 643 backend.pendingLogsFeed.Send(ev) 644 } 645 646 for i := range testCases { 647 err := <-testCases[i].err 648 if err != nil { 649 t.Fatalf("test %d failed: %v", i, err) 650 } 651 <-testCases[i].sub.Err() 652 } 653 } 654 655 // TestPendingTxFilterDeadlock tests if the event loop hangs when pending 656 // txes arrive at the same time that one of multiple filters is timing out. 657 // Please refer to #22131 for more details. 658 func TestPendingTxFilterDeadlock(t *testing.T) { 659 t.Parallel() 660 timeout := 100 * time.Millisecond 661 662 var ( 663 db = rawdb.NewMemoryDatabase() 664 backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout}) 665 api = NewFilterAPI(sys, false) 666 done = make(chan struct{}) 667 ) 668 669 go func() { 670 // Bombard feed with txes until signal was received to stop 671 i := uint64(0) 672 for { 673 select { 674 case <-done: 675 return 676 default: 677 } 678 679 tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil) 680 backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}}) 681 i++ 682 } 683 }() 684 685 // Create a bunch of filters that will 686 // timeout either in 100ms or 200ms 687 fids := make([]rpc.ID, 20) 688 for i := 0; i < len(fids); i++ { 689 fid := api.NewPendingTransactionFilter() 690 fids[i] = fid 691 // Wait for at least one tx to arrive in filter 692 for { 693 hashes, err := api.GetFilterChanges(fid) 694 if err != nil { 695 t.Fatalf("Filter should exist: %v\n", err) 696 } 697 if len(hashes.([]common.Hash)) > 0 { 698 break 699 } 700 runtime.Gosched() 701 } 702 } 703 704 // Wait until filters have timed out 705 time.Sleep(3 * timeout) 706 707 // If tx loop doesn't consume `done` after a second 708 // it's hanging. 709 select { 710 case done <- struct{}{}: 711 // Check that all filters have been uninstalled 712 for _, fid := range fids { 713 if _, err := api.GetFilterChanges(fid); err == nil { 714 t.Errorf("Filter %s should have been uninstalled\n", fid) 715 } 716 } 717 case <-time.After(1 * time.Second): 718 t.Error("Tx sending loop hangs") 719 } 720 } 721 722 func flattenLogs(pl [][]*types.Log) []*types.Log { 723 var logs []*types.Log 724 for _, l := range pl { 725 logs = append(logs, l...) 726 } 727 return logs 728 }