github.com/theQRL/go-zond@v0.1.1/zond/filters/filter_system_test.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package filters 18 19 import ( 20 "context" 21 "errors" 22 "fmt" 23 "math/big" 24 "math/rand" 25 "reflect" 26 "runtime" 27 "testing" 28 "time" 29 30 "github.com/theQRL/go-zond" 31 "github.com/theQRL/go-zond/common" 32 "github.com/theQRL/go-zond/consensus/ethash" 33 "github.com/theQRL/go-zond/core" 34 "github.com/theQRL/go-zond/core/bloombits" 35 "github.com/theQRL/go-zond/core/rawdb" 36 "github.com/theQRL/go-zond/core/types" 37 "github.com/theQRL/go-zond/crypto" 38 "github.com/theQRL/go-zond/event" 39 "github.com/theQRL/go-zond/internal/ethapi" 40 "github.com/theQRL/go-zond/params" 41 "github.com/theQRL/go-zond/rpc" 42 "github.com/theQRL/go-zond/zonddb" 43 ) 44 45 type testBackend struct { 46 db zonddb.Database 47 sections uint64 48 txFeed event.Feed 49 logsFeed event.Feed 50 rmLogsFeed event.Feed 51 pendingLogsFeed event.Feed 52 chainFeed event.Feed 53 pendingBlock *types.Block 54 pendingReceipts types.Receipts 55 } 56 57 func (b *testBackend) ChainConfig() *params.ChainConfig { 58 return params.TestChainConfig 59 } 60 61 func (b *testBackend) CurrentHeader() *types.Header { 62 hdr, _ := b.HeaderByNumber(context.TODO(), rpc.LatestBlockNumber) 63 return hdr 64 } 65 66 func (b *testBackend) ChainDb() zonddb.Database { 67 return b.db 68 } 69 70 func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { 71 var ( 72 hash common.Hash 73 num uint64 74 ) 75 switch blockNr { 76 case rpc.LatestBlockNumber: 77 hash = rawdb.ReadHeadBlockHash(b.db) 78 number := rawdb.ReadHeaderNumber(b.db, hash) 79 if number == nil { 80 return nil, nil 81 } 82 num = *number 83 case rpc.FinalizedBlockNumber: 84 hash = rawdb.ReadFinalizedBlockHash(b.db) 85 number := rawdb.ReadHeaderNumber(b.db, hash) 86 if number == nil { 87 return nil, nil 88 } 89 num = *number 90 case rpc.SafeBlockNumber: 91 return nil, errors.New("safe block not found") 92 default: 93 num = uint64(blockNr) 94 hash = rawdb.ReadCanonicalHash(b.db, num) 95 } 96 return rawdb.ReadHeader(b.db, hash, num), nil 97 } 98 99 func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { 100 number := rawdb.ReadHeaderNumber(b.db, hash) 101 if number == nil { 102 return nil, nil 103 } 104 return rawdb.ReadHeader(b.db, hash, *number), nil 105 } 106 107 func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { 108 if body := rawdb.ReadBody(b.db, hash, uint64(number)); body != nil { 109 return body, nil 110 } 111 return nil, errors.New("block body not found") 112 } 113 114 func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { 115 if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil { 116 if header := rawdb.ReadHeader(b.db, hash, *number); header != nil { 117 return rawdb.ReadReceipts(b.db, hash, *number, header.Time, params.TestChainConfig), nil 118 } 119 } 120 return nil, nil 121 } 122 123 func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { 124 logs := rawdb.ReadLogs(b.db, hash, number) 125 return logs, nil 126 } 127 128 func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { 129 return b.pendingBlock, b.pendingReceipts 130 } 131 132 func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { 133 return b.txFeed.Subscribe(ch) 134 } 135 136 func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { 137 return b.rmLogsFeed.Subscribe(ch) 138 } 139 140 func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { 141 return b.logsFeed.Subscribe(ch) 142 } 143 144 func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { 145 return b.pendingLogsFeed.Subscribe(ch) 146 } 147 148 func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { 149 return b.chainFeed.Subscribe(ch) 150 } 151 152 func (b *testBackend) BloomStatus() (uint64, uint64) { 153 return params.BloomBitsBlocks, b.sections 154 } 155 156 func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { 157 requests := make(chan chan *bloombits.Retrieval) 158 159 go session.Multiplex(16, 0, requests) 160 go func() { 161 for { 162 // Wait for a service request or a shutdown 163 select { 164 case <-ctx.Done(): 165 return 166 167 case request := <-requests: 168 task := <-request 169 170 task.Bitsets = make([][]byte, len(task.Sections)) 171 for i, section := range task.Sections { 172 if rand.Int()%4 != 0 { // Handle occasional missing deliveries 173 head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1) 174 task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head) 175 } 176 } 177 request <- task 178 } 179 } 180 }() 181 } 182 183 func newTestFilterSystem(t testing.TB, db zonddb.Database, cfg Config) (*testBackend, *FilterSystem) { 184 backend := &testBackend{db: db} 185 sys := NewFilterSystem(backend, cfg) 186 return backend, sys 187 } 188 189 // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. 190 // It creates multiple subscriptions: 191 // - one at the start and should receive all posted chain events and a second (blockHashes) 192 // - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2]) 193 // - one that is created after the second cutoff moment (blockHashes[cutoff2:]) 194 func TestBlockSubscription(t *testing.T) { 195 t.Parallel() 196 197 var ( 198 db = rawdb.NewMemoryDatabase() 199 backend, sys = newTestFilterSystem(t, db, Config{}) 200 api = NewFilterAPI(sys, false) 201 genesis = &core.Genesis{ 202 Config: params.TestChainConfig, 203 BaseFee: big.NewInt(params.InitialBaseFee), 204 } 205 _, chain, _ = core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 10, func(i int, gen *core.BlockGen) {}) 206 chainEvents = []core.ChainEvent{} 207 ) 208 209 for _, blk := range chain { 210 chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk}) 211 } 212 213 chan0 := make(chan *types.Header) 214 sub0 := api.events.SubscribeNewHeads(chan0) 215 chan1 := make(chan *types.Header) 216 sub1 := api.events.SubscribeNewHeads(chan1) 217 218 go func() { // simulate client 219 i1, i2 := 0, 0 220 for i1 != len(chainEvents) || i2 != len(chainEvents) { 221 select { 222 case header := <-chan0: 223 if chainEvents[i1].Hash != header.Hash() { 224 t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash()) 225 } 226 i1++ 227 case header := <-chan1: 228 if chainEvents[i2].Hash != header.Hash() { 229 t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash()) 230 } 231 i2++ 232 } 233 } 234 235 sub0.Unsubscribe() 236 sub1.Unsubscribe() 237 }() 238 239 time.Sleep(1 * time.Second) 240 for _, e := range chainEvents { 241 backend.chainFeed.Send(e) 242 } 243 244 <-sub0.Err() 245 <-sub1.Err() 246 } 247 248 // TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. 249 func TestPendingTxFilter(t *testing.T) { 250 t.Parallel() 251 252 var ( 253 db = rawdb.NewMemoryDatabase() 254 backend, sys = newTestFilterSystem(t, db, Config{}) 255 api = NewFilterAPI(sys, false) 256 257 transactions = []*types.Transaction{ 258 types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 259 types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 260 types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 261 types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 262 types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 263 } 264 265 hashes []common.Hash 266 ) 267 268 fid0 := api.NewPendingTransactionFilter(nil) 269 270 time.Sleep(1 * time.Second) 271 backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) 272 273 timeout := time.Now().Add(1 * time.Second) 274 for { 275 results, err := api.GetFilterChanges(fid0) 276 if err != nil { 277 t.Fatalf("Unable to retrieve logs: %v", err) 278 } 279 280 h := results.([]common.Hash) 281 hashes = append(hashes, h...) 282 if len(hashes) >= len(transactions) { 283 break 284 } 285 // check timeout 286 if time.Now().After(timeout) { 287 break 288 } 289 290 time.Sleep(100 * time.Millisecond) 291 } 292 293 if len(hashes) != len(transactions) { 294 t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes)) 295 return 296 } 297 for i := range hashes { 298 if hashes[i] != transactions[i].Hash() { 299 t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i]) 300 } 301 } 302 } 303 304 // TestPendingTxFilterFullTx tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. 305 func TestPendingTxFilterFullTx(t *testing.T) { 306 t.Parallel() 307 308 var ( 309 db = rawdb.NewMemoryDatabase() 310 backend, sys = newTestFilterSystem(t, db, Config{}) 311 api = NewFilterAPI(sys, false) 312 313 transactions = []*types.Transaction{ 314 types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 315 types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 316 types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 317 types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 318 types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), 319 } 320 321 txs []*ethapi.RPCTransaction 322 ) 323 324 fullTx := true 325 fid0 := api.NewPendingTransactionFilter(&fullTx) 326 327 time.Sleep(1 * time.Second) 328 backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) 329 330 timeout := time.Now().Add(1 * time.Second) 331 for { 332 results, err := api.GetFilterChanges(fid0) 333 if err != nil { 334 t.Fatalf("Unable to retrieve logs: %v", err) 335 } 336 337 tx := results.([]*ethapi.RPCTransaction) 338 txs = append(txs, tx...) 339 if len(txs) >= len(transactions) { 340 break 341 } 342 // check timeout 343 if time.Now().After(timeout) { 344 break 345 } 346 347 time.Sleep(100 * time.Millisecond) 348 } 349 350 if len(txs) != len(transactions) { 351 t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(txs)) 352 return 353 } 354 for i := range txs { 355 if txs[i].Hash != transactions[i].Hash() { 356 t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), txs[i].Hash) 357 } 358 } 359 } 360 361 // TestLogFilterCreation test whether a given filter criteria makes sense. 362 // If not it must return an error. 363 func TestLogFilterCreation(t *testing.T) { 364 var ( 365 db = rawdb.NewMemoryDatabase() 366 _, sys = newTestFilterSystem(t, db, Config{}) 367 api = NewFilterAPI(sys, false) 368 369 testCases = []struct { 370 crit FilterCriteria 371 success bool 372 }{ 373 // defaults 374 {FilterCriteria{}, true}, 375 // valid block number range 376 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true}, 377 // "mined" block range to pending 378 {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true}, 379 // new mined and pending blocks 380 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true}, 381 // from block "higher" than to block 382 {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false}, 383 // from block "higher" than to block 384 {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 385 // from block "higher" than to block 386 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, 387 // from block "higher" than to block 388 {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false}, 389 } 390 ) 391 392 for i, test := range testCases { 393 id, err := api.NewFilter(test.crit) 394 if err != nil && test.success { 395 t.Errorf("expected filter creation for case %d to success, got %v", i, err) 396 } 397 if err == nil { 398 api.UninstallFilter(id) 399 if !test.success { 400 t.Errorf("expected testcase %d to fail with an error", i) 401 } 402 } 403 } 404 } 405 406 // TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error 407 // when the filter is created. 408 func TestInvalidLogFilterCreation(t *testing.T) { 409 t.Parallel() 410 411 var ( 412 db = rawdb.NewMemoryDatabase() 413 _, sys = newTestFilterSystem(t, db, Config{}) 414 api = NewFilterAPI(sys, false) 415 ) 416 417 // different situations where log filter creation should fail. 418 // Reason: fromBlock > toBlock 419 testCases := []FilterCriteria{ 420 0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 421 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 422 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, 423 } 424 425 for i, test := range testCases { 426 if _, err := api.NewFilter(test); err == nil { 427 t.Errorf("Expected NewFilter for case #%d to fail", i) 428 } 429 } 430 } 431 432 func TestInvalidGetLogsRequest(t *testing.T) { 433 var ( 434 db = rawdb.NewMemoryDatabase() 435 _, sys = newTestFilterSystem(t, db, Config{}) 436 api = NewFilterAPI(sys, false) 437 blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 438 ) 439 440 // Reason: Cannot specify both BlockHash and FromBlock/ToBlock) 441 testCases := []FilterCriteria{ 442 0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)}, 443 1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)}, 444 2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 445 } 446 447 for i, test := range testCases { 448 if _, err := api.GetLogs(context.Background(), test); err == nil { 449 t.Errorf("Expected Logs for case #%d to fail", i) 450 } 451 } 452 } 453 454 // TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. 455 func TestLogFilter(t *testing.T) { 456 t.Parallel() 457 458 var ( 459 db = rawdb.NewMemoryDatabase() 460 backend, sys = newTestFilterSystem(t, db, Config{}) 461 api = NewFilterAPI(sys, false) 462 463 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 464 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 465 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 466 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 467 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 468 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 469 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 470 471 // posted twice, once as regular logs and once as pending logs. 472 allLogs = []*types.Log{ 473 {Address: firstAddr}, 474 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 475 {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, 476 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2}, 477 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}, 478 } 479 480 expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]} 481 expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]} 482 483 testCases = []struct { 484 crit FilterCriteria 485 expected []*types.Log 486 id rpc.ID 487 }{ 488 // match all 489 0: {FilterCriteria{}, allLogs, ""}, 490 // match none due to no matching addresses 491 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, 492 // match logs based on addresses, ignore topics 493 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 494 // match none due to no matching topics (match with address) 495 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""}, 496 // match logs based on addresses and topics 497 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 498 // match logs based on multiple addresses and "or" topics 499 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""}, 500 // logs in the pending block 501 6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""}, 502 // mined logs with block num >= 2 or pending logs 503 7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""}, 504 // all "mined" logs with block num >= 2 505 8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""}, 506 // all "mined" logs 507 9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""}, 508 // all "mined" logs with 1>= block num <=2 and topic secondTopic 509 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, 510 // all "mined" and pending logs with topic firstTopic 511 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""}, 512 // match all logs due to wildcard topic 513 12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""}, 514 } 515 ) 516 517 // create all filters 518 for i := range testCases { 519 testCases[i].id, _ = api.NewFilter(testCases[i].crit) 520 } 521 522 // raise events 523 time.Sleep(1 * time.Second) 524 if nsend := backend.logsFeed.Send(allLogs); nsend == 0 { 525 t.Fatal("Logs event not delivered") 526 } 527 if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 { 528 t.Fatal("Pending logs event not delivered") 529 } 530 531 for i, tt := range testCases { 532 var fetched []*types.Log 533 timeout := time.Now().Add(1 * time.Second) 534 for { // fetch all expected logs 535 results, err := api.GetFilterChanges(tt.id) 536 if err != nil { 537 t.Fatalf("Unable to fetch logs: %v", err) 538 } 539 540 fetched = append(fetched, results.([]*types.Log)...) 541 if len(fetched) >= len(tt.expected) { 542 break 543 } 544 // check timeout 545 if time.Now().After(timeout) { 546 break 547 } 548 549 time.Sleep(100 * time.Millisecond) 550 } 551 552 if len(fetched) != len(tt.expected) { 553 t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 554 return 555 } 556 557 for l := range fetched { 558 if fetched[l].Removed { 559 t.Errorf("expected log not to be removed for log %d in case %d", l, i) 560 } 561 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 562 t.Errorf("invalid log on index %d for case %d", l, i) 563 } 564 } 565 } 566 } 567 568 // TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed. 569 func TestPendingLogsSubscription(t *testing.T) { 570 t.Parallel() 571 572 var ( 573 db = rawdb.NewMemoryDatabase() 574 backend, sys = newTestFilterSystem(t, db, Config{}) 575 api = NewFilterAPI(sys, false) 576 577 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 578 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 579 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 580 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 581 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 582 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 583 thirdTopic = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333") 584 fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") 585 notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") 586 587 allLogs = [][]*types.Log{ 588 {{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}, 589 {{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}, 590 {{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}, 591 {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}, 592 {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}, 593 { 594 {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 595 {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5}, 596 {Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5}, 597 {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, 598 }, 599 } 600 601 pendingBlockNumber = big.NewInt(rpc.PendingBlockNumber.Int64()) 602 603 testCases = []struct { 604 crit zond.FilterQuery 605 expected []*types.Log 606 c chan []*types.Log 607 sub *Subscription 608 err chan error 609 }{ 610 // match all 611 { 612 zond.FilterQuery{FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 613 flattenLogs(allLogs), 614 nil, nil, nil, 615 }, 616 // match none due to no matching addresses 617 { 618 zond.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 619 nil, 620 nil, nil, nil, 621 }, 622 // match logs based on addresses, ignore topics 623 { 624 zond.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 625 append(flattenLogs(allLogs[:2]), allLogs[5][3]), 626 nil, nil, nil, 627 }, 628 // match none due to no matching topics (match with address) 629 { 630 zond.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 631 nil, 632 nil, nil, nil, 633 }, 634 // match logs based on addresses and topics 635 { 636 zond.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 637 append(flattenLogs(allLogs[3:5]), allLogs[5][0]), 638 nil, nil, nil, 639 }, 640 // match logs based on multiple addresses and "or" topics 641 { 642 zond.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 643 append(flattenLogs(allLogs[2:5]), allLogs[5][0]), 644 nil, nil, nil, 645 }, 646 // multiple pending logs, should match only 2 topics from the logs in block 5 647 { 648 zond.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, 649 []*types.Log{allLogs[5][0], allLogs[5][2]}, 650 nil, nil, nil, 651 }, 652 // match none due to only matching new mined logs 653 { 654 zond.FilterQuery{}, 655 nil, 656 nil, nil, nil, 657 }, 658 // match none due to only matching mined logs within a specific block range 659 { 660 zond.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, 661 nil, 662 nil, nil, nil, 663 }, 664 // match all due to matching mined and pending logs 665 { 666 zond.FilterQuery{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, 667 flattenLogs(allLogs), 668 nil, nil, nil, 669 }, 670 // match none due to matching logs from a specific block number to new mined blocks 671 { 672 zond.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, 673 nil, 674 nil, nil, nil, 675 }, 676 } 677 ) 678 679 // create all subscriptions, this ensures all subscriptions are created before the events are posted. 680 // on slow machines this could otherwise lead to missing events when the subscription is created after 681 // (some) events are posted. 682 for i := range testCases { 683 testCases[i].c = make(chan []*types.Log) 684 testCases[i].err = make(chan error, 1) 685 686 var err error 687 testCases[i].sub, err = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) 688 if err != nil { 689 t.Fatalf("SubscribeLogs %d failed: %v\n", i, err) 690 } 691 } 692 693 for n, test := range testCases { 694 i := n 695 tt := test 696 go func() { 697 defer tt.sub.Unsubscribe() 698 699 var fetched []*types.Log 700 701 timeout := time.After(1 * time.Second) 702 fetchLoop: 703 for { 704 select { 705 case logs := <-tt.c: 706 // Do not break early if we've fetched greater, or equal, 707 // to the number of logs expected. This ensures we do not 708 // deadlock the filter system because it will do a blocking 709 // send on this channel if another log arrives. 710 fetched = append(fetched, logs...) 711 case <-timeout: 712 break fetchLoop 713 } 714 } 715 716 if len(fetched) != len(tt.expected) { 717 tt.err <- fmt.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 718 return 719 } 720 721 for l := range fetched { 722 if fetched[l].Removed { 723 tt.err <- fmt.Errorf("expected log not to be removed for log %d in case %d", l, i) 724 return 725 } 726 if !reflect.DeepEqual(fetched[l], tt.expected[l]) { 727 tt.err <- fmt.Errorf("invalid log on index %d for case %d\n", l, i) 728 return 729 } 730 } 731 tt.err <- nil 732 }() 733 } 734 735 // raise events 736 for _, ev := range allLogs { 737 backend.pendingLogsFeed.Send(ev) 738 } 739 740 for i := range testCases { 741 err := <-testCases[i].err 742 if err != nil { 743 t.Fatalf("test %d failed: %v", i, err) 744 } 745 <-testCases[i].sub.Err() 746 } 747 } 748 749 func TestLightFilterLogs(t *testing.T) { 750 t.Parallel() 751 752 var ( 753 db = rawdb.NewMemoryDatabase() 754 backend, sys = newTestFilterSystem(t, db, Config{}) 755 api = NewFilterAPI(sys, true) 756 signer = types.HomesteadSigner{} 757 758 firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") 759 secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") 760 thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") 761 notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") 762 firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") 763 secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") 764 765 // posted twice, once as regular logs and once as pending logs. 766 allLogs = []*types.Log{ 767 // Block 1 768 {Address: firstAddr, Topics: []common.Hash{}, Data: []byte{}, BlockNumber: 2, Index: 0}, 769 // Block 2 770 {Address: firstAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 0}, 771 {Address: secondAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 1}, 772 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 3, Index: 2}, 773 // Block 3 774 {Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 4, Index: 0}, 775 } 776 777 testCases = []struct { 778 crit FilterCriteria 779 expected []*types.Log 780 id rpc.ID 781 }{ 782 // match all 783 0: {FilterCriteria{}, allLogs, ""}, 784 // match none due to no matching addresses 785 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, 786 // match logs based on addresses, ignore topics 787 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, 788 // match logs based on addresses and topics 789 3: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, 790 // all logs with block num >= 3 791 4: {FilterCriteria{FromBlock: big.NewInt(3), ToBlock: big.NewInt(5)}, allLogs[1:], ""}, 792 // all logs 793 5: {FilterCriteria{FromBlock: big.NewInt(0), ToBlock: big.NewInt(5)}, allLogs, ""}, 794 // all logs with 1>= block num <=2 and topic secondTopic 795 6: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(3), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, 796 } 797 798 key, _ = crypto.GenerateKey() 799 addr = crypto.PubkeyToAddress(key.PublicKey) 800 genesis = &core.Genesis{Config: params.TestChainConfig, 801 Alloc: core.GenesisAlloc{ 802 addr: {Balance: big.NewInt(params.Ether)}, 803 }, 804 } 805 receipts = []*types.Receipt{{ 806 Logs: []*types.Log{allLogs[0]}, 807 }, { 808 Logs: []*types.Log{allLogs[1], allLogs[2], allLogs[3]}, 809 }, { 810 Logs: []*types.Log{allLogs[4]}, 811 }} 812 ) 813 814 _, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 4, func(i int, b *core.BlockGen) { 815 if i == 0 { 816 return 817 } 818 receipts[i-1].Bloom = types.CreateBloom(types.Receipts{receipts[i-1]}) 819 b.AddUncheckedReceipt(receipts[i-1]) 820 tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i - 1), To: &common.Address{}, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, key) 821 b.AddTx(tx) 822 }) 823 for i, block := range blocks { 824 rawdb.WriteBlock(db, block) 825 rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) 826 rawdb.WriteHeadBlockHash(db, block.Hash()) 827 if i > 0 { 828 rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), []*types.Receipt{receipts[i-1]}) 829 } 830 } 831 // create all filters 832 for i := range testCases { 833 id, err := api.NewFilter(testCases[i].crit) 834 if err != nil { 835 t.Fatal(err) 836 } 837 testCases[i].id = id 838 } 839 840 // raise events 841 time.Sleep(1 * time.Second) 842 for _, block := range blocks { 843 backend.chainFeed.Send(core.ChainEvent{Block: block, Hash: common.Hash{}, Logs: allLogs}) 844 } 845 846 for i, tt := range testCases { 847 var fetched []*types.Log 848 timeout := time.Now().Add(1 * time.Second) 849 for { // fetch all expected logs 850 results, err := api.GetFilterChanges(tt.id) 851 if err != nil { 852 t.Fatalf("Unable to fetch logs: %v", err) 853 } 854 fetched = append(fetched, results.([]*types.Log)...) 855 if len(fetched) >= len(tt.expected) { 856 break 857 } 858 // check timeout 859 if time.Now().After(timeout) { 860 break 861 } 862 863 time.Sleep(100 * time.Millisecond) 864 } 865 866 if len(fetched) != len(tt.expected) { 867 t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) 868 return 869 } 870 871 for l := range fetched { 872 if fetched[l].Removed { 873 t.Errorf("expected log not to be removed for log %d in case %d", l, i) 874 } 875 expected := *tt.expected[l] 876 blockNum := expected.BlockNumber - 1 877 expected.BlockHash = blocks[blockNum].Hash() 878 expected.TxHash = blocks[blockNum].Transactions()[0].Hash() 879 if !reflect.DeepEqual(fetched[l], &expected) { 880 t.Errorf("invalid log on index %d for case %d", l, i) 881 } 882 } 883 } 884 } 885 886 // TestPendingTxFilterDeadlock tests if the event loop hangs when pending 887 // txes arrive at the same time that one of multiple filters is timing out. 888 // Please refer to #22131 for more details. 889 func TestPendingTxFilterDeadlock(t *testing.T) { 890 t.Parallel() 891 timeout := 100 * time.Millisecond 892 893 var ( 894 db = rawdb.NewMemoryDatabase() 895 backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout}) 896 api = NewFilterAPI(sys, false) 897 done = make(chan struct{}) 898 ) 899 900 go func() { 901 // Bombard feed with txes until signal was received to stop 902 i := uint64(0) 903 for { 904 select { 905 case <-done: 906 return 907 default: 908 } 909 910 tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil) 911 backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}}) 912 i++ 913 } 914 }() 915 916 // Create a bunch of filters that will 917 // timeout either in 100ms or 200ms 918 fids := make([]rpc.ID, 20) 919 for i := 0; i < len(fids); i++ { 920 fid := api.NewPendingTransactionFilter(nil) 921 fids[i] = fid 922 // Wait for at least one tx to arrive in filter 923 for { 924 hashes, err := api.GetFilterChanges(fid) 925 if err != nil { 926 t.Fatalf("Filter should exist: %v\n", err) 927 } 928 if len(hashes.([]common.Hash)) > 0 { 929 break 930 } 931 runtime.Gosched() 932 } 933 } 934 935 // Wait until filters have timed out 936 time.Sleep(3 * timeout) 937 938 // If tx loop doesn't consume `done` after a second 939 // it's hanging. 940 select { 941 case done <- struct{}{}: 942 // Check that all filters have been uninstalled 943 for _, fid := range fids { 944 if _, err := api.GetFilterChanges(fid); err == nil { 945 t.Errorf("Filter %s should have been uninstalled\n", fid) 946 } 947 } 948 case <-time.After(1 * time.Second): 949 t.Error("Tx sending loop hangs") 950 } 951 } 952 953 func flattenLogs(pl [][]*types.Log) []*types.Log { 954 var logs []*types.Log 955 for _, l := range pl { 956 logs = append(logs, l...) 957 } 958 return logs 959 }