github.com/electroneum/electroneum-sc@v0.0.0-20230105223411-3bc1d078281e/eth/filters/filter_system_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package filters 18 19 import ( 20 "context" 21 "fmt" 22 "math/big" 23 "math/rand" 24 "reflect" 25 "runtime" 26 "testing" 27 "time" 28 29 electroneum "github.com/electroneum/electroneum-sc" 30 "github.com/electroneum/electroneum-sc/common" 31 "github.com/electroneum/electroneum-sc/consensus/ethash" 32 "github.com/electroneum/electroneum-sc/core" 33 "github.com/electroneum/electroneum-sc/core/bloombits" 34 "github.com/electroneum/electroneum-sc/core/rawdb" 35 "github.com/electroneum/electroneum-sc/core/types" 36 "github.com/electroneum/electroneum-sc/ethdb" 37 "github.com/electroneum/electroneum-sc/event" 38 "github.com/electroneum/electroneum-sc/params" 39 "github.com/electroneum/electroneum-sc/rpc" 40 ) 41 42 var ( 43 deadline = 5 * time.Minute 44 ) 45 46 type testBackend struct { 47 db ethdb.Database 48 sections uint64 49 txFeed event.Feed 50 logsFeed event.Feed 51 rmLogsFeed event.Feed 52 pendingLogsFeed event.Feed 53 chainFeed event.Feed 54 } 55 56 func (b *testBackend) ChainDb() ethdb.Database { 57 return b.db 58 } 59 60 func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { 61 var ( 62 hash common.Hash 63 num uint64 64 ) 65 if blockNr == rpc.LatestBlockNumber { 66 hash = rawdb.ReadHeadBlockHash(b.db) 67 number := rawdb.ReadHeaderNumber(b.db, hash) 68 if number == nil { 69 return nil, nil 70 } 71 num = *number 72 } else { 73 num = uint64(blockNr) 74 hash = rawdb.ReadCanonicalHash(b.db, num) 75 } 76 return rawdb.ReadHeader(b.db, hash, num), nil 77 } 78 79 func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { 80 number := rawdb.ReadHeaderNumber(b.db, hash) 81 if number == nil { 82 return nil, nil 83 } 84 return rawdb.ReadHeader(b.db, hash, *number), nil 85 } 86 87 func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { 88 if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil { 89 return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil 90 } 91 return nil, nil 92 } 93 94 func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { 95 number := rawdb.ReadHeaderNumber(b.db, hash) 96 if number == nil { 97 return nil, nil 98 } 99 receipts := rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig) 100 101 logs := make([][]*types.Log, len(receipts)) 102 for i, receipt := range receipts { 103 logs[i] = receipt.Logs 104 } 105 return logs, nil 106 } 107 108 func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { 109 return b.txFeed.Subscribe(ch) 110 } 111 112 func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { 113 return b.rmLogsFeed.Subscribe(ch) 114 } 115 116 func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 117 return b.logsFeed.Subscribe(ch) 118 } 119 120 func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { 121 return b.pendingLogsFeed.Subscribe(ch) 122 } 123 124 func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { 125 return b.chainFeed.Subscribe(ch) 126 } 127 128 func (b *testBackend) BloomStatus() (uint64, uint64) { 129 return params.BloomBitsBlocks, b.sections 130 } 131 132 func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { 133 requests := make(chan chan *bloombits.Retrieval) 134 135 go session.Multiplex(16, 0, requests) 136 go func() { 137 for { 138 // Wait for a service request or a shutdown 139 select { 140 case <-ctx.Done(): 141 return 142 143 case request := <-requests: 144 task := <-request 145 146 task.Bitsets = make([][]byte, len(task.Sections)) 147 for i, section := range task.Sections { 148 if rand.Int()%4 != 0 { // Handle occasional missing deliveries 149 head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1) 150 task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head) 151 } 152 } 153 request <- task 154 } 155 } 156 }() 157 } 158 159 // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. 160 // It creates multiple subscriptions: 161 // - one at the start and should receive all posted chain events and a second (blockHashes) 162 // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2]) 163 // - one that is created after the second cutoff moment (blockHashes[cutoff2:]) 164 func TestBlockSubscription(t *testing.T) { 165 t.Parallel() 166 167 var ( 168 db = rawdb.NewMemoryDatabase() 169 backend = &testBackend{db: db} 170 api = NewPublicFilterAPI(backend, false, deadline) 171 genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db) 172 chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) 173 chainEvents = []core.ChainEvent{} 174 ) 175 176 for _, blk := range chain { 177 chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk}) 178 } 179 180 chan0 := make(chan *types.Header) 181 sub0 := api.events.SubscribeNewHeads(chan0) 182 chan1 := make(chan *types.Header) 183 sub1 := api.events.SubscribeNewHeads(chan1) 184 185 go func() { // simulate client 186 i1, i2 := 0, 0 187 for i1 != len(chainEvents) || i2 != len(chainEvents) { 188 select { 189 case header := <-chan0: 190 if chainEvents[i1].Hash != header.Hash() { 191 t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash()) 192 } 193 i1++ 194 case header := <-chan1: 195 if chainEvents[i2].Hash != header.Hash() { 196 t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash()) 197 } 198 i2++ 199 } 200 } 201 202 sub0.Unsubscribe() 203 sub1.Unsubscribe() 204 }() 205 206 time.Sleep(1 * time.Second) 207 for _, e := range chainEvents { 208 backend.chainFeed.Send(e) 209 } 210 211 <-sub0.Err() 212 <-sub1.Err() 213 } 214 215 // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. 216 func TestPendingTxFilter(t *testing.T) { 217 t.Parallel() 218 219 var ( 220 db = rawdb.NewMemoryDatabase() 221 backend = &testBackend{db: db} 222 api = NewPublicFilterAPI(backend, false, deadline) 223 224 transactions = []*types.Transaction{ 225 types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 226 types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 227 types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 228 types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 229 types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 230 } 231 232 hashes []common.Hash 233 ) 234 235 fid0 := api.NewPendingTransactionFilter() 236 237 time.Sleep(1 * time.Second) 238 backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) 239 240 timeout := time.Now().Add(1 * time.Second) 241 for { 242 results, err := api.GetFilterChanges(fid0) 243 if err != nil { 244 t.Fatalf("Unable to retrieve logs: %v", err) 245 } 246 247 h := results.([]common.Hash) 248 hashes = append(hashes, h...) 249 if len(hashes) >= len(transactions) { 250 break 251 } 252 // check timeout 253 if time.Now().After(timeout) { 254 break 255 } 256 257 time.Sleep(100 * time.Millisecond) 258 } 259 260 if len(hashes) != len(transactions) { 261 t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes)) 262 return 263 } 264 for i := range hashes { 265 if hashes[i] != transactions[i].Hash() { 266 t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i]) 267 } 268 } 269 } 270 271 // TestLogFilterCreation test whether a given filter criteria makes sense. 272 // If not it must return an error. 273 func TestLogFilterCreation(t *testing.T) { 274 var ( 275 db = rawdb.NewMemoryDatabase() 276 backend = &testBackend{db: db} 277 api = NewPublicFilterAPI(backend, false, deadline) 278 279 testCases = []struct { 280 crit FilterCriteria 281 success bool 282 }{ 283 // defaults 284 {FilterCriteria{}, true}, 285 // valid block number range 286 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true}, 287 // "mined" block range to pending 288 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true}, 289 // new mined and pending blocks 290 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true}, 291 // from block "higher" than to block 292 {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false}, 293 // from block "higher" than to block 294 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 295 // from block "higher" than to block 296 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 297 // from block "higher" than to block 298 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false}, 299 } 300 ) 301 302 for i, test := range testCases { 303 id, err := api.NewFilter(test.crit) 304 if err != nil && test.success { 305 t.Errorf("expected filter creation for case %d to success, got %v", i, err) 306 } 307 if err == nil { 308 api.UninstallFilter(id) 309 if !test.success { 310 t.Errorf("expected testcase %d to fail with an error", i) 311 } 312 } 313 } 314 } 315 316 // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error 317 // when the filter is created. 318 func TestInvalidLogFilterCreation(t *testing.T) { 319 t.Parallel() 320 321 var ( 322 db = rawdb.NewMemoryDatabase() 323 backend = &testBackend{db: db} 324 api = NewPublicFilterAPI(backend, false, deadline) 325 ) 326 327 // different situations where log filter creation should fail. 328 // Reason: fromBlock > toBlock 329 testCases := []FilterCriteria{ 330 0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 331 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 332 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 333 } 334 335 for i, test := range testCases { 336 if _, err := api.NewFilter(test); err == nil { 337 t.Errorf("Expected NewFilter for case #%d to fail", i) 338 } 339 } 340 } 341 342 func TestInvalidGetLogsRequest(t *testing.T) { 343 var ( 344 db = rawdb.NewMemoryDatabase() 345 backend = &testBackend{db: db} 346 api = NewPublicFilterAPI(backend, false, deadline) 347 blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 348 ) 349 350 // Reason: Cannot specify both BlockHash and FromBlock/ToBlock) 351 testCases := []FilterCriteria{ 352 0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)}, 353 1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)}, 354 2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 355 } 356 357 for i, test := range testCases { 358 if _, err := api.GetLogs(context.Background(), test); err == nil { 359 t.Errorf("Expected Logs for case #%d to fail", i) 360 } 361 } 362 } 363 364 // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. 365 func TestLogFilter(t *testing.T) { 366 t.Parallel() 367 368 var ( 369 db = rawdb.NewMemoryDatabase() 370 backend = &testBackend{db: db} 371 api = NewPublicFilterAPI(backend, false, deadline) 372 373 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 374 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 375 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 376 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 377 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 378 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 379 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 380 381 // posted twice, once as regular logs and once as pending logs. 382 allLogs = []*types.Log{ 383 {Address: firstAddr}, 384 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 385 {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 386 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2}, 387 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}, 388 } 389 390 expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]} 391 expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]} 392 393 testCases = []struct { 394 crit FilterCriteria 395 expected []*types.Log 396 id rpc.ID 397 }{ 398 // match all 399 0: {FilterCriteria{}, allLogs, ""}, 400 // match none due to no matching addresses 401 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, 402 // match logs based on addresses, ignore topics 403 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 404 // match none due to no matching topics (match with address) 405 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""}, 406 // match logs based on addresses and topics 407 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 408 // match logs based on multiple addresses and "or" topics 409 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""}, 410 // logs in the pending block 411 6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""}, 412 // mined logs with block num >= 2 or pending logs 413 7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""}, 414 // all "mined" logs with block num >= 2 415 8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""}, 416 // all "mined" logs 417 9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""}, 418 // all "mined" logs with 1>= block num <=2 and topic secondTopic 419 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, 420 // all "mined" and pending logs with topic firstTopic 421 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""}, 422 // match all logs due to wildcard topic 423 12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""}, 424 } 425 ) 426 427 // create all filters 428 for i := range testCases { 429 testCases[i].id, _ = api.NewFilter(testCases[i].crit) 430 } 431 432 // raise events 433 time.Sleep(1 * time.Second) 434 if nsend := backend.logsFeed.Send(allLogs); nsend == 0 { 435 t.Fatal("Logs event not delivered") 436 } 437 if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 { 438 t.Fatal("Pending logs event not delivered") 439 } 440 441 for i, tt := range testCases { 442 var fetched []*types.Log 443 timeout := time.Now().Add(1 * time.Second) 444 for { // fetch all expected logs 445 results, err := api.GetFilterChanges(tt.id) 446 if err != nil { 447 t.Fatalf("Unable to fetch logs: %v", err) 448 } 449 450 fetched = append(fetched, results.([]*types.Log)...) 451 if len(fetched) >= len(tt.expected) { 452 break 453 } 454 // check timeout 455 if time.Now().After(timeout) { 456 break 457 } 458 459 time.Sleep(100 * time.Millisecond) 460 } 461 462 if len(fetched) != len(tt.expected) { 463 t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 464 return 465 } 466 467 for l := range fetched { 468 if fetched[l].Removed { 469 t.Errorf("expected log not to be removed for log %d in case %d", l, i) 470 } 471 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 472 t.Errorf("invalid log on index %d for case %d", l, i) 473 } 474 } 475 } 476 } 477 478 // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed. 479 func TestPendingLogsSubscription(t *testing.T) { 480 t.Parallel() 481 482 var ( 483 db = rawdb.NewMemoryDatabase() 484 backend = &testBackend{db: db} 485 api = NewPublicFilterAPI(backend, false, deadline) 486 487 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 488 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 489 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 490 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 491 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 492 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 493 thirdTopic = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333") 494 fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") 495 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 496 497 allLogs = [][]*types.Log{ 498 {{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}, 499 {{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}, 500 {{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}, 501 {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}, 502 {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}, 503 { 504 {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 505 {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5}, 506 {Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5}, 507 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 508 }, 509 } 510 511 pendingBlockNumber = big.NewInt(rpc.PendingBlockNumber.Int64()) 512 513 testCases = []struct { 514 crit electroneum.FilterQuery 515 expected []*types.Log 516 c chan []*types.Log 517 sub *Subscription 518 err chan error 519 }{ 520 // match all 521 { 522 electroneum.FilterQuery{FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 523 flattenLogs(allLogs), 524 nil, nil, nil, 525 }, 526 // match none due to no matching addresses 527 { 528 electroneum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 529 nil, 530 nil, nil, nil, 531 }, 532 // match logs based on addresses, ignore topics 533 { 534 electroneum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 535 append(flattenLogs(allLogs[:2]), allLogs[5][3]), 536 nil, nil, nil, 537 }, 538 // match none due to no matching topics (match with address) 539 { 540 electroneum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 541 nil, 542 nil, nil, nil, 543 }, 544 // match logs based on addresses and topics 545 { 546 electroneum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 547 append(flattenLogs(allLogs[3:5]), allLogs[5][0]), 548 nil, nil, nil, 549 }, 550 // match logs based on multiple addresses and "or" topics 551 { 552 electroneum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 553 append(flattenLogs(allLogs[2:5]), allLogs[5][0]), 554 nil, nil, nil, 555 }, 556 // multiple pending logs, should match only 2 topics from the logs in block 5 557 { 558 electroneum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 559 []*types.Log{allLogs[5][0], allLogs[5][2]}, 560 nil, nil, nil, 561 }, 562 // match none due to only matching new mined logs 563 { 564 electroneum.FilterQuery{}, 565 nil, 566 nil, nil, nil, 567 }, 568 // match none due to only matching mined logs within a specific block range 569 { 570 electroneum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, 571 nil, 572 nil, nil, nil, 573 }, 574 // match all due to matching mined and pending logs 575 { 576 electroneum.FilterQuery{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, 577 flattenLogs(allLogs), 578 nil, nil, nil, 579 }, 580 // match none due to matching logs from a specific block number to new mined blocks 581 { 582 electroneum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 583 nil, 584 nil, nil, nil, 585 }, 586 } 587 ) 588 589 // create all subscriptions, this ensures all subscriptions are created before the events are posted. 590 // on slow machines this could otherwise lead to missing events when the subscription is created after 591 // (some) events are posted. 592 for i := range testCases { 593 testCases[i].c = make(chan []*types.Log) 594 testCases[i].err = make(chan error) 595 596 var err error 597 testCases[i].sub, err = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) 598 if err != nil { 599 t.Fatalf("SubscribeLogs %d failed: %v\n", i, err) 600 } 601 } 602 603 for n, test := range testCases { 604 i := n 605 tt := test 606 go func() { 607 defer tt.sub.Unsubscribe() 608 609 var fetched []*types.Log 610 611 timeout := time.After(1 * time.Second) 612 fetchLoop: 613 for { 614 select { 615 case logs := <-tt.c: 616 // Do not break early if we've fetched greater, or equal, 617 // to the number of logs expected. This ensures we do not 618 // deadlock the filter system because it will do a blocking 619 // send on this channel if another log arrives. 620 fetched = append(fetched, logs...) 621 case <-timeout: 622 break fetchLoop 623 } 624 } 625 626 if len(fetched) != len(tt.expected) { 627 tt.err <- fmt.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 628 return 629 } 630 631 for l := range fetched { 632 if fetched[l].Removed { 633 tt.err <- fmt.Errorf("expected log not to be removed for log %d in case %d", l, i) 634 return 635 } 636 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 637 tt.err <- fmt.Errorf("invalid log on index %d for case %d\n", l, i) 638 return 639 } 640 } 641 tt.err <- nil 642 }() 643 } 644 645 // raise events 646 for _, ev := range allLogs { 647 backend.pendingLogsFeed.Send(ev) 648 } 649 650 for i := range testCases { 651 err := <-testCases[i].err 652 if err != nil { 653 t.Fatalf("test %d failed: %v", i, err) 654 } 655 <-testCases[i].sub.Err() 656 } 657 } 658 659 // TestPendingTxFilterDeadlock tests if the event loop hangs when pending 660 // txes arrive at the same time that one of multiple filters is timing out. 661 // Please refer to #22131 for more details. 662 func TestPendingTxFilterDeadlock(t *testing.T) { 663 t.Parallel() 664 timeout := 100 * time.Millisecond 665 666 var ( 667 db = rawdb.NewMemoryDatabase() 668 backend = &testBackend{db: db} 669 api = NewPublicFilterAPI(backend, false, timeout) 670 done = make(chan struct{}) 671 ) 672 673 go func() { 674 // Bombard feed with txes until signal was received to stop 675 i := uint64(0) 676 for { 677 select { 678 case <-done: 679 return 680 default: 681 } 682 683 tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil) 684 backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}}) 685 i++ 686 } 687 }() 688 689 // Create a bunch of filters that will 690 // timeout either in 100ms or 200ms 691 fids := make([]rpc.ID, 20) 692 for i := 0; i < len(fids); i++ { 693 fid := api.NewPendingTransactionFilter() 694 fids[i] = fid 695 // Wait for at least one tx to arrive in filter 696 for { 697 hashes, err := api.GetFilterChanges(fid) 698 if err != nil { 699 t.Fatalf("Filter should exist: %v\n", err) 700 } 701 if len(hashes.([]common.Hash)) > 0 { 702 break 703 } 704 runtime.Gosched() 705 } 706 } 707 708 // Wait until filters have timed out 709 time.Sleep(3 * timeout) 710 711 // If tx loop doesn't consume `done` after a second 712 // it's hanging. 713 select { 714 case done <- struct{}{}: 715 // Check that all filters have been uninstalled 716 for _, fid := range fids { 717 if _, err := api.GetFilterChanges(fid); err == nil { 718 t.Errorf("Filter %s should have been uninstalled\n", fid) 719 } 720 } 721 case <-time.After(1 * time.Second): 722 t.Error("Tx sending loop hangs") 723 } 724 } 725 726 func flattenLogs(pl [][]*types.Log) []*types.Log { 727 var logs []*types.Log 728 for _, l := range pl { 729 logs = append(logs, l...) 730 } 731 return logs 732 }