github.com/tacshi/go-ethereum@v0.0.0-20230616113857-84a434e20921/eth/filters/filter_system_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package filters 18 19 import ( 20 "context" 21 "errors" 22 "fmt" 23 "math/big" 24 "math/rand" 25 "reflect" 26 "runtime" 27 "testing" 28 "time" 29 30 "github.com/tacshi/go-ethereum" 31 "github.com/tacshi/go-ethereum/common" 32 "github.com/tacshi/go-ethereum/consensus/ethash" 33 "github.com/tacshi/go-ethereum/core" 34 "github.com/tacshi/go-ethereum/core/bloombits" 35 "github.com/tacshi/go-ethereum/core/rawdb" 36 "github.com/tacshi/go-ethereum/core/types" 37 "github.com/tacshi/go-ethereum/crypto" 38 "github.com/tacshi/go-ethereum/ethdb" 39 "github.com/tacshi/go-ethereum/event" 40 "github.com/tacshi/go-ethereum/internal/ethapi" 41 "github.com/tacshi/go-ethereum/params" 42 "github.com/tacshi/go-ethereum/rpc" 43 ) 44 45 type testBackend struct { 46 db ethdb.Database 47 sections uint64 48 txFeed event.Feed 49 logsFeed event.Feed 50 rmLogsFeed event.Feed 51 pendingLogsFeed event.Feed 52 chainFeed event.Feed 53 } 54 55 func (b *testBackend) ChainConfig() *params.ChainConfig { 56 return params.TestChainConfig 57 } 58 59 func (b *testBackend) CurrentHeader() *types.Header { 60 hdr, _ := b.HeaderByNumber(context.TODO(), rpc.LatestBlockNumber) 61 return hdr 62 } 63 64 func (b *testBackend) ChainDb() ethdb.Database { 65 return b.db 66 } 67 68 func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { 69 var ( 70 hash common.Hash 71 num uint64 72 ) 73 switch blockNr { 74 case rpc.LatestBlockNumber: 75 hash = rawdb.ReadHeadBlockHash(b.db) 76 number := rawdb.ReadHeaderNumber(b.db, hash) 77 if number == nil { 78 return nil, nil 79 } 80 num = *number 81 case rpc.FinalizedBlockNumber: 82 hash = rawdb.ReadFinalizedBlockHash(b.db) 83 number := rawdb.ReadHeaderNumber(b.db, hash) 84 if number == nil { 85 return nil, nil 86 } 87 num = *number 88 case rpc.SafeBlockNumber: 89 return nil, errors.New("safe block not found") 90 default: 91 num = uint64(blockNr) 92 hash = rawdb.ReadCanonicalHash(b.db, num) 93 } 94 return rawdb.ReadHeader(b.db, hash, num), nil 95 } 96 97 func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { 98 number := rawdb.ReadHeaderNumber(b.db, hash) 99 if number == nil { 100 return nil, nil 101 } 102 return rawdb.ReadHeader(b.db, hash, *number), nil 103 } 104 105 func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { 106 if body := rawdb.ReadBody(b.db, hash, uint64(number)); body != nil { 107 return body, nil 108 } 109 return nil, errors.New("block body not found") 110 } 111 112 func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { 113 if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil { 114 return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil 115 } 116 return nil, nil 117 } 118 119 func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { 120 logs := rawdb.ReadLogs(b.db, hash, number, params.TestChainConfig) 121 return logs, nil 122 } 123 124 func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { 125 return nil, nil 126 } 127 128 func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { 129 return b.txFeed.Subscribe(ch) 130 } 131 132 func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { 133 return b.rmLogsFeed.Subscribe(ch) 134 } 135 136 func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 137 return b.logsFeed.Subscribe(ch) 138 } 139 140 func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { 141 return b.pendingLogsFeed.Subscribe(ch) 142 } 143 144 func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { 145 return b.chainFeed.Subscribe(ch) 146 } 147 148 func (b *testBackend) BloomStatus() (uint64, uint64) { 149 return params.BloomBitsBlocks, b.sections 150 } 151 152 func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { 153 requests := make(chan chan *bloombits.Retrieval) 154 155 go session.Multiplex(16, 0, requests) 156 go func() { 157 for { 158 // Wait for a service request or a shutdown 159 select { 160 case <-ctx.Done(): 161 return 162 163 case request := <-requests: 164 task := <-request 165 166 task.Bitsets = make([][]byte, len(task.Sections)) 167 for i, section := range task.Sections { 168 if rand.Int()%4 != 0 { // Handle occasional missing deliveries 169 head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1) 170 task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head) 171 } 172 } 173 request <- task 174 } 175 } 176 }() 177 } 178 179 func newTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config) (*testBackend, *FilterSystem) { 180 backend := &testBackend{db: db} 181 sys := NewFilterSystem(backend, cfg) 182 return backend, sys 183 } 184 185 // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. 186 // It creates multiple subscriptions: 187 // - one at the start and should receive all posted chain events and a second (blockHashes) 188 // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2]) 189 // - one that is created after the second cutoff moment (blockHashes[cutoff2:]) 190 func TestBlockSubscription(t *testing.T) { 191 t.Parallel() 192 193 var ( 194 db = rawdb.NewMemoryDatabase() 195 backend, sys = newTestFilterSystem(t, db, Config{}) 196 api = NewFilterAPI(sys, false) 197 genesis = &core.Genesis{ 198 Config: params.TestChainConfig, 199 BaseFee: big.NewInt(params.InitialBaseFee), 200 } 201 _, chain, _ = core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 10, func(i int, gen *core.BlockGen) {}) 202 chainEvents = []core.ChainEvent{} 203 ) 204 205 for _, blk := range chain { 206 chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk}) 207 } 208 209 chan0 := make(chan *types.Header) 210 sub0 := api.events.SubscribeNewHeads(chan0) 211 chan1 := make(chan *types.Header) 212 sub1 := api.events.SubscribeNewHeads(chan1) 213 214 go func() { // simulate client 215 i1, i2 := 0, 0 216 for i1 != len(chainEvents) || i2 != len(chainEvents) { 217 select { 218 case header := <-chan0: 219 if chainEvents[i1].Hash != header.Hash() { 220 t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash()) 221 } 222 i1++ 223 case header := <-chan1: 224 if chainEvents[i2].Hash != header.Hash() { 225 t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash()) 226 } 227 i2++ 228 } 229 } 230 231 sub0.Unsubscribe() 232 sub1.Unsubscribe() 233 }() 234 235 time.Sleep(1 * time.Second) 236 for _, e := range chainEvents { 237 backend.chainFeed.Send(e) 238 } 239 240 <-sub0.Err() 241 <-sub1.Err() 242 } 243 244 // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. 245 func TestPendingTxFilter(t *testing.T) { 246 t.Parallel() 247 248 var ( 249 db = rawdb.NewMemoryDatabase() 250 backend, sys = newTestFilterSystem(t, db, Config{}) 251 api = NewFilterAPI(sys, false) 252 253 transactions = []*types.Transaction{ 254 types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 255 types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 256 types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 257 types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 258 types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 259 } 260 261 hashes []common.Hash 262 ) 263 264 fid0 := api.NewPendingTransactionFilter(nil) 265 266 time.Sleep(1 * time.Second) 267 backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) 268 269 timeout := time.Now().Add(1 * time.Second) 270 for { 271 results, err := api.GetFilterChanges(fid0) 272 if err != nil { 273 t.Fatalf("Unable to retrieve logs: %v", err) 274 } 275 276 h := results.([]common.Hash) 277 hashes = append(hashes, h...) 278 if len(hashes) >= len(transactions) { 279 break 280 } 281 // check timeout 282 if time.Now().After(timeout) { 283 break 284 } 285 286 time.Sleep(100 * time.Millisecond) 287 } 288 289 if len(hashes) != len(transactions) { 290 t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes)) 291 return 292 } 293 for i := range hashes { 294 if hashes[i] != transactions[i].Hash() { 295 t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i]) 296 } 297 } 298 } 299 300 // TestPendingTxFilterFullTx tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. 301 func TestPendingTxFilterFullTx(t *testing.T) { 302 t.Parallel() 303 304 var ( 305 db = rawdb.NewMemoryDatabase() 306 backend, sys = newTestFilterSystem(t, db, Config{}) 307 api = NewFilterAPI(sys, false) 308 309 transactions = []*types.Transaction{ 310 types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 311 types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 312 types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 313 types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 314 types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 315 } 316 317 txs []*ethapi.RPCTransaction 318 ) 319 320 fullTx := true 321 fid0 := api.NewPendingTransactionFilter(&fullTx) 322 323 time.Sleep(1 * time.Second) 324 backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) 325 326 timeout := time.Now().Add(1 * time.Second) 327 for { 328 results, err := api.GetFilterChanges(fid0) 329 if err != nil { 330 t.Fatalf("Unable to retrieve logs: %v", err) 331 } 332 333 tx := results.([]*ethapi.RPCTransaction) 334 txs = append(txs, tx...) 335 if len(txs) >= len(transactions) { 336 break 337 } 338 // check timeout 339 if time.Now().After(timeout) { 340 break 341 } 342 343 time.Sleep(100 * time.Millisecond) 344 } 345 346 if len(txs) != len(transactions) { 347 t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(txs)) 348 return 349 } 350 for i := range txs { 351 if txs[i].Hash != transactions[i].Hash() { 352 t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), txs[i].Hash) 353 } 354 } 355 } 356 357 // TestLogFilterCreation test whether a given filter criteria makes sense. 358 // If not it must return an error. 359 func TestLogFilterCreation(t *testing.T) { 360 var ( 361 db = rawdb.NewMemoryDatabase() 362 _, sys = newTestFilterSystem(t, db, Config{}) 363 api = NewFilterAPI(sys, false) 364 365 testCases = []struct { 366 crit FilterCriteria 367 success bool 368 }{ 369 // defaults 370 {FilterCriteria{}, true}, 371 // valid block number range 372 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true}, 373 // "mined" block range to pending 374 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true}, 375 // new mined and pending blocks 376 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true}, 377 // from block "higher" than to block 378 {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false}, 379 // from block "higher" than to block 380 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 381 // from block "higher" than to block 382 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 383 // from block "higher" than to block 384 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false}, 385 } 386 ) 387 388 for i, test := range testCases { 389 id, err := api.NewFilter(test.crit) 390 if err != nil && test.success { 391 t.Errorf("expected filter creation for case %d to success, got %v", i, err) 392 } 393 if err == nil { 394 api.UninstallFilter(id) 395 if !test.success { 396 t.Errorf("expected testcase %d to fail with an error", i) 397 } 398 } 399 } 400 } 401 402 // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error 403 // when the filter is created. 404 func TestInvalidLogFilterCreation(t *testing.T) { 405 t.Parallel() 406 407 var ( 408 db = rawdb.NewMemoryDatabase() 409 _, sys = newTestFilterSystem(t, db, Config{}) 410 api = NewFilterAPI(sys, false) 411 ) 412 413 // different situations where log filter creation should fail. 414 // Reason: fromBlock > toBlock 415 testCases := []FilterCriteria{ 416 0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 417 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 418 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 419 } 420 421 for i, test := range testCases { 422 if _, err := api.NewFilter(test); err == nil { 423 t.Errorf("Expected NewFilter for case #%d to fail", i) 424 } 425 } 426 } 427 428 func TestInvalidGetLogsRequest(t *testing.T) { 429 var ( 430 db = rawdb.NewMemoryDatabase() 431 _, sys = newTestFilterSystem(t, db, Config{}) 432 api = NewFilterAPI(sys, false) 433 blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 434 ) 435 436 // Reason: Cannot specify both BlockHash and FromBlock/ToBlock) 437 testCases := []FilterCriteria{ 438 0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)}, 439 1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)}, 440 2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 441 } 442 443 for i, test := range testCases { 444 if _, err := api.GetLogs(context.Background(), test); err == nil { 445 t.Errorf("Expected Logs for case #%d to fail", i) 446 } 447 } 448 } 449 450 // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. 451 func TestLogFilter(t *testing.T) { 452 t.Parallel() 453 454 var ( 455 db = rawdb.NewMemoryDatabase() 456 backend, sys = newTestFilterSystem(t, db, Config{}) 457 api = NewFilterAPI(sys, false) 458 459 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 460 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 461 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 462 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 463 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 464 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 465 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 466 467 // posted twice, once as regular logs and once as pending logs. 468 allLogs = []*types.Log{ 469 {Address: firstAddr}, 470 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 471 {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 472 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2}, 473 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}, 474 } 475 476 expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]} 477 expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]} 478 479 testCases = []struct { 480 crit FilterCriteria 481 expected []*types.Log 482 id rpc.ID 483 }{ 484 // match all 485 0: {FilterCriteria{}, allLogs, ""}, 486 // match none due to no matching addresses 487 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, 488 // match logs based on addresses, ignore topics 489 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 490 // match none due to no matching topics (match with address) 491 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""}, 492 // match logs based on addresses and topics 493 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 494 // match logs based on multiple addresses and "or" topics 495 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""}, 496 // logs in the pending block 497 6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""}, 498 // mined logs with block num >= 2 or pending logs 499 7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""}, 500 // all "mined" logs with block num >= 2 501 8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""}, 502 // all "mined" logs 503 9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""}, 504 // all "mined" logs with 1>= block num <=2 and topic secondTopic 505 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, 506 // all "mined" and pending logs with topic firstTopic 507 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""}, 508 // match all logs due to wildcard topic 509 12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""}, 510 } 511 ) 512 513 // create all filters 514 for i := range testCases { 515 testCases[i].id, _ = api.NewFilter(testCases[i].crit) 516 } 517 518 // raise events 519 time.Sleep(1 * time.Second) 520 if nsend := backend.logsFeed.Send(allLogs); nsend == 0 { 521 t.Fatal("Logs event not delivered") 522 } 523 if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 { 524 t.Fatal("Pending logs event not delivered") 525 } 526 527 for i, tt := range testCases { 528 var fetched []*types.Log 529 timeout := time.Now().Add(1 * time.Second) 530 for { // fetch all expected logs 531 results, err := api.GetFilterChanges(tt.id) 532 if err != nil { 533 t.Fatalf("Unable to fetch logs: %v", err) 534 } 535 536 fetched = append(fetched, results.([]*types.Log)...) 537 if len(fetched) >= len(tt.expected) { 538 break 539 } 540 // check timeout 541 if time.Now().After(timeout) { 542 break 543 } 544 545 time.Sleep(100 * time.Millisecond) 546 } 547 548 if len(fetched) != len(tt.expected) { 549 t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 550 return 551 } 552 553 for l := range fetched { 554 if fetched[l].Removed { 555 t.Errorf("expected log not to be removed for log %d in case %d", l, i) 556 } 557 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 558 t.Errorf("invalid log on index %d for case %d", l, i) 559 } 560 } 561 } 562 } 563 564 // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed. 565 func TestPendingLogsSubscription(t *testing.T) { 566 t.Parallel() 567 568 var ( 569 db = rawdb.NewMemoryDatabase() 570 backend, sys = newTestFilterSystem(t, db, Config{}) 571 api = NewFilterAPI(sys, false) 572 573 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 574 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 575 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 576 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 577 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 578 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 579 thirdTopic = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333") 580 fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") 581 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 582 583 allLogs = [][]*types.Log{ 584 {{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}, 585 {{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}, 586 {{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}, 587 {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}, 588 {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}, 589 { 590 {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 591 {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5}, 592 {Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5}, 593 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 594 }, 595 } 596 597 pendingBlockNumber = big.NewInt(rpc.PendingBlockNumber.Int64()) 598 599 testCases = []struct { 600 crit ethereum.FilterQuery 601 expected []*types.Log 602 c chan []*types.Log 603 sub *Subscription 604 err chan error 605 }{ 606 // match all 607 { 608 ethereum.FilterQuery{FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 609 flattenLogs(allLogs), 610 nil, nil, nil, 611 }, 612 // match none due to no matching addresses 613 { 614 ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 615 nil, 616 nil, nil, nil, 617 }, 618 // match logs based on addresses, ignore topics 619 { 620 ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 621 append(flattenLogs(allLogs[:2]), allLogs[5][3]), 622 nil, nil, nil, 623 }, 624 // match none due to no matching topics (match with address) 625 { 626 ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 627 nil, 628 nil, nil, nil, 629 }, 630 // match logs based on addresses and topics 631 { 632 ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 633 append(flattenLogs(allLogs[3:5]), allLogs[5][0]), 634 nil, nil, nil, 635 }, 636 // match logs based on multiple addresses and "or" topics 637 { 638 ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 639 append(flattenLogs(allLogs[2:5]), allLogs[5][0]), 640 nil, nil, nil, 641 }, 642 // multiple pending logs, should match only 2 topics from the logs in block 5 643 { 644 ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 645 []*types.Log{allLogs[5][0], allLogs[5][2]}, 646 nil, nil, nil, 647 }, 648 // match none due to only matching new mined logs 649 { 650 ethereum.FilterQuery{}, 651 nil, 652 nil, nil, nil, 653 }, 654 // match none due to only matching mined logs within a specific block range 655 { 656 ethereum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, 657 nil, 658 nil, nil, nil, 659 }, 660 // match all due to matching mined and pending logs 661 { 662 ethereum.FilterQuery{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, 663 flattenLogs(allLogs), 664 nil, nil, nil, 665 }, 666 // match none due to matching logs from a specific block number to new mined blocks 667 { 668 ethereum.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 669 nil, 670 nil, nil, nil, 671 }, 672 } 673 ) 674 675 // create all subscriptions, this ensures all subscriptions are created before the events are posted. 676 // on slow machines this could otherwise lead to missing events when the subscription is created after 677 // (some) events are posted. 678 for i := range testCases { 679 testCases[i].c = make(chan []*types.Log) 680 testCases[i].err = make(chan error, 1) 681 682 var err error 683 testCases[i].sub, err = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) 684 if err != nil { 685 t.Fatalf("SubscribeLogs %d failed: %v\n", i, err) 686 } 687 } 688 689 for n, test := range testCases { 690 i := n 691 tt := test 692 go func() { 693 defer tt.sub.Unsubscribe() 694 695 var fetched []*types.Log 696 697 timeout := time.After(1 * time.Second) 698 fetchLoop: 699 for { 700 select { 701 case logs := <-tt.c: 702 // Do not break early if we've fetched greater, or equal, 703 // to the number of logs expected. This ensures we do not 704 // deadlock the filter system because it will do a blocking 705 // send on this channel if another log arrives. 706 fetched = append(fetched, logs...) 707 case <-timeout: 708 break fetchLoop 709 } 710 } 711 712 if len(fetched) != len(tt.expected) { 713 tt.err <- fmt.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 714 return 715 } 716 717 for l := range fetched { 718 if fetched[l].Removed { 719 tt.err <- fmt.Errorf("expected log not to be removed for log %d in case %d", l, i) 720 return 721 } 722 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 723 tt.err <- fmt.Errorf("invalid log on index %d for case %d\n", l, i) 724 return 725 } 726 } 727 tt.err <- nil 728 }() 729 } 730 731 // raise events 732 for _, ev := range allLogs { 733 backend.pendingLogsFeed.Send(ev) 734 } 735 736 for i := range testCases { 737 err := <-testCases[i].err 738 if err != nil { 739 t.Fatalf("test %d failed: %v", i, err) 740 } 741 <-testCases[i].sub.Err() 742 } 743 } 744 745 func TestLightFilterLogs(t *testing.T) { 746 t.Parallel() 747 748 var ( 749 db = rawdb.NewMemoryDatabase() 750 backend, sys = newTestFilterSystem(t, db, Config{}) 751 api = NewFilterAPI(sys, true) 752 signer = types.HomesteadSigner{} 753 754 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 755 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 756 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 757 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 758 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 759 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 760 761 // posted twice, once as regular logs and once as pending logs. 762 allLogs = []*types.Log{ 763 // Block 1 764 {Address: firstAddr, Topics: []common.Hash{}, Data: []byte{}, BlockNumber: 2, Index: 0}, 765 // Block 2 766 {Address: firstAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 0}, 767 {Address: secondAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 1}, 768 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 3, Index: 2}, 769 // Block 3 770 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 4, Index: 0}, 771 } 772 773 testCases = []struct { 774 crit FilterCriteria 775 expected []*types.Log 776 id rpc.ID 777 }{ 778 // match all 779 0: {FilterCriteria{}, allLogs, ""}, 780 // match none due to no matching addresses 781 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, 782 // match logs based on addresses, ignore topics 783 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 784 // match logs based on addresses and topics 785 3: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 786 // all logs with block num >= 3 787 4: {FilterCriteria{FromBlock: big.NewInt(3), ToBlock: big.NewInt(5)}, allLogs[1:], ""}, 788 // all logs 789 5: {FilterCriteria{FromBlock: big.NewInt(0), ToBlock: big.NewInt(5)}, allLogs, ""}, 790 // all logs with 1>= block num <=2 and topic secondTopic 791 6: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(3), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, 792 } 793 794 key, _ = crypto.GenerateKey() 795 addr = crypto.PubkeyToAddress(key.PublicKey) 796 genesis = &core.Genesis{Config: params.TestChainConfig, 797 Alloc: core.GenesisAlloc{ 798 addr: {Balance: big.NewInt(params.Ether)}, 799 }, 800 } 801 receipts = []*types.Receipt{{ 802 Logs: []*types.Log{allLogs[0]}, 803 }, { 804 Logs: []*types.Log{allLogs[1], allLogs[2], allLogs[3]}, 805 }, { 806 Logs: []*types.Log{allLogs[4]}, 807 }} 808 ) 809 810 _, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 4, func(i int, b *core.BlockGen) { 811 if i == 0 { 812 return 813 } 814 receipts[i-1].Bloom = types.CreateBloom(types.Receipts{receipts[i-1]}) 815 b.AddUncheckedReceipt(receipts[i-1]) 816 tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i - 1), To: &common.Address{}, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, key) 817 b.AddTx(tx) 818 }) 819 for i, block := range blocks { 820 rawdb.WriteBlock(db, block) 821 rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) 822 rawdb.WriteHeadBlockHash(db, block.Hash()) 823 if i > 0 { 824 rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), []*types.Receipt{receipts[i-1]}) 825 } 826 } 827 // create all filters 828 for i := range testCases { 829 id, err := api.NewFilter(testCases[i].crit) 830 if err != nil { 831 t.Fatal(err) 832 } 833 testCases[i].id = id 834 } 835 836 // raise events 837 time.Sleep(1 * time.Second) 838 for _, block := range blocks { 839 backend.chainFeed.Send(core.ChainEvent{Block: block, Hash: common.Hash{}, Logs: allLogs}) 840 } 841 842 for i, tt := range testCases { 843 var fetched []*types.Log 844 timeout := time.Now().Add(1 * time.Second) 845 for { // fetch all expected logs 846 results, err := api.GetFilterChanges(tt.id) 847 if err != nil { 848 t.Fatalf("Unable to fetch logs: %v", err) 849 } 850 fetched = append(fetched, results.([]*types.Log)...) 851 if len(fetched) >= len(tt.expected) { 852 break 853 } 854 // check timeout 855 if time.Now().After(timeout) { 856 break 857 } 858 859 time.Sleep(100 * time.Millisecond) 860 } 861 862 if len(fetched) != len(tt.expected) { 863 t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 864 return 865 } 866 867 for l := range fetched { 868 if fetched[l].Removed { 869 t.Errorf("expected log not to be removed for log %d in case %d", l, i) 870 } 871 expected := *tt.expected[l] 872 blockNum := expected.BlockNumber - 1 873 expected.BlockHash = blocks[blockNum].Hash() 874 expected.TxHash = blocks[blockNum].Transactions()[0].Hash() 875 if !reflect.DeepEqual(fetched[l], &expected) { 876 t.Errorf("invalid log on index %d for case %d", l, i) 877 } 878 } 879 } 880 } 881 882 // TestPendingTxFilterDeadlock tests if the event loop hangs when pending 883 // txes arrive at the same time that one of multiple filters is timing out. 884 // Please refer to #22131 for more details. 885 func TestPendingTxFilterDeadlock(t *testing.T) { 886 t.Parallel() 887 timeout := 100 * time.Millisecond 888 889 var ( 890 db = rawdb.NewMemoryDatabase() 891 backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout}) 892 api = NewFilterAPI(sys, false) 893 done = make(chan struct{}) 894 ) 895 896 go func() { 897 // Bombard feed with txes until signal was received to stop 898 i := uint64(0) 899 for { 900 select { 901 case <-done: 902 return 903 default: 904 } 905 906 tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil) 907 backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}}) 908 i++ 909 } 910 }() 911 912 // Create a bunch of filters that will 913 // timeout either in 100ms or 200ms 914 fids := make([]rpc.ID, 20) 915 for i := 0; i < len(fids); i++ { 916 fid := api.NewPendingTransactionFilter(nil) 917 fids[i] = fid 918 // Wait for at least one tx to arrive in filter 919 for { 920 hashes, err := api.GetFilterChanges(fid) 921 if err != nil { 922 t.Fatalf("Filter should exist: %v\n", err) 923 } 924 if len(hashes.([]common.Hash)) > 0 { 925 break 926 } 927 runtime.Gosched() 928 } 929 } 930 931 // Wait until filters have timed out 932 time.Sleep(3 * timeout) 933 934 // If tx loop doesn't consume `done` after a second 935 // it's hanging. 936 select { 937 case done <- struct{}{}: 938 // Check that all filters have been uninstalled 939 for _, fid := range fids { 940 if _, err := api.GetFilterChanges(fid); err == nil { 941 t.Errorf("Filter %s should have been uninstalled\n", fid) 942 } 943 } 944 case <-time.After(1 * time.Second): 945 t.Error("Tx sending loop hangs") 946 } 947 } 948 949 func flattenLogs(pl [][]*types.Log) []*types.Log { 950 var logs []*types.Log 951 for _, l := range pl { 952 logs = append(logs, l...) 953 } 954 return logs 955 }