github.com/0xsequence/ethkit@v1.25.0/ethreceipts/ethreceipts.go (about) 1 package ethreceipts 2 3 import ( 4 "context" 5 "errors" 6 "fmt" 7 "math/big" 8 "sync" 9 "sync/atomic" 10 "time" 11 12 "github.com/0xsequence/ethkit/ethmonitor" 13 "github.com/0xsequence/ethkit/ethrpc" 14 "github.com/0xsequence/ethkit/ethtxn" 15 "github.com/0xsequence/ethkit/go-ethereum" 16 "github.com/0xsequence/ethkit/go-ethereum/common" 17 "github.com/0xsequence/ethkit/go-ethereum/core/types" 18 "github.com/0xsequence/ethkit/util" 19 "github.com/goware/breaker" 20 "github.com/goware/cachestore" 21 "github.com/goware/cachestore/memlru" 22 "github.com/goware/calc" 23 "github.com/goware/channel" 24 "github.com/goware/logger" 25 "github.com/goware/superr" 26 "golang.org/x/sync/errgroup" 27 ) 28 29 var DefaultOptions = Options{ 30 MaxConcurrentFetchReceiptWorkers: 100, 31 MaxConcurrentFilterWorkers: 50, 32 PastReceiptsCacheSize: 5_000, 33 NumBlocksToFinality: 0, // value of <=0 here will select from ethrpc.Networks[chainID].NumBlocksToFinality 34 FilterMaxWaitNumBlocks: 0, // value of 0 here means no limit, and will listen until manually unsubscribed 35 Alerter: util.NoopAlerter(), 36 } 37 38 type Options struct { 39 // .. 40 MaxConcurrentFetchReceiptWorkers int 41 42 // .. 43 MaxConcurrentFilterWorkers int 44 45 // .. 46 PastReceiptsCacheSize int 47 48 // .. 49 NumBlocksToFinality int 50 51 // FilterMaxWaitNumBlocks is the maximum amount of blocks a filter will wait between getting 52 // a receipt filter match, before the filter will unsubscribe itself and stop listening. 53 // This value may be overriden by setting FilterCond#MaxListenNumBlocks on per-filter basis. 54 // 55 // NOTE: 56 // * value of -1 will use NumBlocksToFinality*2 57 // * value of 0 will set no limit, so filter will always listen [default] 58 // * value of N will set the N number of blocks without results before unsubscribing between iterations 59 FilterMaxWaitNumBlocks int 60 61 // Cache backend ... 62 // CacheBackend cachestore.Backend 63 64 // Alerter config via github.com/goware/alerter 65 Alerter util.Alerter 66 } 67 68 type ReceiptsListener struct { 69 options Options 70 log logger.Logger 71 alert util.Alerter 72 provider ethrpc.Interface 73 monitor *ethmonitor.Monitor 74 br *breaker.Breaker 75 76 // fetchSem is used to limit amount of concurrenct fetch requests 77 fetchSem chan struct{} 78 79 // pastReceipts is a cache of past requested receipts 80 pastReceipts cachestore.Store[*types.Receipt] 81 82 // notFoundTxnHashes is a cache to flag txn hashes which are not found on the network 83 // so that we can avoid having to ask to refetch. The monitor will pick up these txn hashes 84 // for us if they end up turning up. 85 notFoundTxnHashes cachestore.Store[uint64] 86 87 // ... 88 subscribers []*subscriber 89 registerFiltersCh chan registerFilters 90 filterSem chan struct{} 91 92 ctx context.Context 93 ctxStop context.CancelFunc 94 running int32 95 mu sync.RWMutex 96 } 97 98 var ( 99 ErrFilterMatch = errors.New("ethreceipts: filter match fail") 100 ErrFilterCond = errors.New("ethreceipts: missing filter condition") 101 ErrFilterExhausted = errors.New("ethreceipts: filter exhausted after maxWait blocks") 102 ErrSubscriptionClosed = errors.New("ethreceipts: subscription closed") 103 ) 104 105 func NewReceiptsListener(log logger.Logger, provider ethrpc.Interface, monitor *ethmonitor.Monitor, options ...Options) (*ReceiptsListener, error) { 106 opts := DefaultOptions 107 if len(options) > 0 { 108 opts = options[0] 109 } 110 111 if opts.Alerter == nil { 112 opts.Alerter = util.NoopAlerter() 113 } 114 115 if !monitor.Options().WithLogs { 116 return nil, fmt.Errorf("ethreceipts: ReceiptsListener needs a monitor with WithLogs enabled to function") 117 } 118 119 minBlockRetentionLimit := 50 120 if monitor.Options().BlockRetentionLimit < minBlockRetentionLimit { 121 return nil, fmt.Errorf("ethreceipts: monitor options BlockRetentionLimit must be at least %d", minBlockRetentionLimit) 122 } 123 124 // TODO: use opts.CacheBackend if set.. 125 // but, could be a lot for redis.. so, make sure to use Compose if we do it.. 126 pastReceipts, err := memlru.NewWithSize[*types.Receipt](opts.PastReceiptsCacheSize) 127 if err != nil { 128 return nil, err 129 } 130 131 // TODO: use opts.CacheBackend if set.. maybe combine with cachestore.Compose and memlru..? 132 notFoundTxnHashes, err := memlru.NewWithSize[uint64](5000) //, cachestore.WithDefaultKeyExpiry(2*time.Minute)) 133 if err != nil { 134 return nil, err 135 } 136 137 return &ReceiptsListener{ 138 options: opts, 139 log: log, 140 alert: opts.Alerter, 141 provider: provider, 142 monitor: monitor, 143 br: breaker.New(log, 1*time.Second, 2, 4), // max 4 retries 144 fetchSem: make(chan struct{}, opts.MaxConcurrentFetchReceiptWorkers), 145 pastReceipts: pastReceipts, 146 notFoundTxnHashes: notFoundTxnHashes, 147 subscribers: make([]*subscriber, 0), 148 registerFiltersCh: make(chan registerFilters, 1000), 149 filterSem: make(chan struct{}, opts.MaxConcurrentFilterWorkers), 150 }, nil 151 } 152 153 func (l *ReceiptsListener) lazyInit(ctx context.Context) error { 154 if l.options.NumBlocksToFinality <= 0 { 155 chainID, err := getChainID(ctx, l.provider) 156 if err != nil { 157 chainID = big.NewInt(1) // assume mainnet in case of unlikely error 158 } 159 network, ok := ethrpc.Networks[chainID.Uint64()] 160 if ok { 161 l.options.NumBlocksToFinality = network.NumBlocksToFinality 162 } 163 } 164 165 if l.options.NumBlocksToFinality <= 0 { 166 l.options.NumBlocksToFinality = 1 // absolute min is 1 167 } 168 169 return nil 170 } 171 172 func (l *ReceiptsListener) Run(ctx context.Context) error { 173 if l.IsRunning() { 174 return fmt.Errorf("ethreceipts: already running") 175 } 176 177 l.ctx, l.ctxStop = context.WithCancel(ctx) 178 179 atomic.StoreInt32(&l.running, 1) 180 defer atomic.StoreInt32(&l.running, 0) 181 182 if err := l.lazyInit(ctx); err != nil { 183 return err 184 } 185 186 l.log.Info("ethreceipts: running") 187 188 return l.listener() 189 } 190 191 func (l *ReceiptsListener) Stop() { 192 l.log.Info("ethreceipts: stop") 193 l.ctxStop() 194 } 195 196 func (l *ReceiptsListener) IsRunning() bool { 197 return atomic.LoadInt32(&l.running) == 1 198 } 199 200 func (l *ReceiptsListener) Subscribe(filterQueries ...FilterQuery) Subscription { 201 l.mu.Lock() 202 defer l.mu.Unlock() 203 204 subscriber := &subscriber{ 205 listener: l, 206 ch: channel.NewUnboundedChan[Receipt](2, 5000, channel.Options{ 207 Logger: l.log, 208 Alerter: l.alert, 209 Label: "ethreceipts:subscriber", 210 }), 211 done: make(chan struct{}), 212 finalizer: &finalizer{ 213 numBlocksToFinality: big.NewInt(int64(l.options.NumBlocksToFinality)), 214 queue: []finalTxn{}, 215 txns: map[common.Hash]struct{}{}, 216 }, 217 } 218 219 subscriber.unsubscribe = func() { 220 close(subscriber.done) 221 subscriber.ch.Close() 222 subscriber.ch.Flush() 223 224 l.mu.Lock() 225 defer l.mu.Unlock() 226 227 for i, sub := range l.subscribers { 228 if sub == subscriber { 229 l.subscribers = append(l.subscribers[:i], l.subscribers[i+1:]...) 230 return 231 } 232 } 233 } 234 235 l.subscribers = append(l.subscribers, subscriber) 236 237 // Subscribe to the filters 238 subscriber.AddFilter(filterQueries...) 239 240 return subscriber 241 } 242 243 func (l *ReceiptsListener) NumSubscribers() int { 244 l.mu.Lock() 245 defer l.mu.Unlock() 246 return len(l.subscribers) 247 } 248 249 func (l *ReceiptsListener) PurgeHistory() { 250 l.mu.Lock() 251 defer l.mu.Unlock() 252 l.pastReceipts.ClearAll(context.Background()) 253 l.notFoundTxnHashes.ClearAll(context.Background()) 254 } 255 256 type WaitReceiptFinalityFunc func(ctx context.Context) (*Receipt, error) 257 258 func (l *ReceiptsListener) FetchTransactionReceipt(ctx context.Context, txnHash common.Hash, optMaxBlockWait ...int) (*Receipt, WaitReceiptFinalityFunc, error) { 259 maxWait := -1 // default use -1 maxWait, which is finality*2 value 260 if len(optMaxBlockWait) > 0 { 261 maxWait = optMaxBlockWait[0] 262 } 263 filter := FilterTxnHash(txnHash).MaxWait(maxWait) 264 return l.FetchTransactionReceiptWithFilter(ctx, filter) 265 } 266 267 func (l *ReceiptsListener) FetchTransactionReceiptWithFilter(ctx context.Context, filter FilterQuery) (*Receipt, WaitReceiptFinalityFunc, error) { 268 // Fetch method searches for just a single filter match. If you'd like to keep the filter 269 // open to listen to many similar receipts, use .Subscribe(filter) directly instead. 270 query := filter.LimitOne(true).SearchCache(true).SearchOnChain(true).Finalize(true) 271 272 filterer, ok := query.(Filterer) 273 if !ok { 274 return nil, nil, fmt.Errorf("ethreceipts: unable to cast Filterer from FilterQuery") 275 } 276 277 condMaxWait := 0 278 if filterer.Options().MaxWait != nil { 279 condMaxWait = *filterer.Options().MaxWait 280 } 281 condTxnHash := "" 282 if filterer.Cond().TxnHash != nil { 283 condTxnHash = (*filterer.Cond().TxnHash).String() 284 } 285 286 sub := l.Subscribe(query) 287 288 exhausted := make(chan struct{}) 289 mined := make(chan Receipt, 2) 290 finalized := make(chan Receipt, 1) 291 found := uint32(0) 292 293 finalityFunc := func(ctx context.Context) (*Receipt, error) { 294 select { 295 case <-ctx.Done(): 296 return nil, ctx.Err() 297 case receipt, ok := <-finalized: 298 if !ok { 299 return nil, superr.Wrap(ErrFilterExhausted, fmt.Errorf("txnHash=%s maxWait=%d", condTxnHash, condMaxWait)) 300 } 301 return &receipt, nil 302 } 303 } 304 305 // TODO/NOTE: perhaps in an extended node failure. could there be a scenario 306 // where filterer.Exhausted is never hit? and this subscription never unsubscribes..? 307 // don't think so, but we can double check. 308 go func() { 309 defer sub.Unsubscribe() 310 defer close(mined) 311 defer close(finalized) 312 313 for { 314 select { 315 case <-ctx.Done(): 316 return 317 318 case <-time.After(500 * time.Millisecond): 319 select { 320 case <-filterer.Exhausted(): 321 // exhausted, but, lets see if there has ever been a match 322 // as we want to make sure we allow the finalizer to finish. 323 // if there has never been a match, we can finish now. 324 // if filterer.LastMatchBlockNum() == 0 { 325 if found == 0 { 326 close(exhausted) 327 return 328 } 329 default: 330 // not exhausted 331 } 332 333 case receipt, ok := <-sub.TransactionReceipt(): 334 if !ok { 335 return 336 } 337 338 atomic.StoreUint32(&found, 1) 339 340 if receipt.Final { 341 // write to mined chan again in case the receipt has 342 // immediately finalized, so we want to mine+finalize now. 343 mined <- receipt 344 345 // write to finalized chan and return -- were done 346 finalized <- receipt 347 return 348 } else { 349 if receipt.Reorged { 350 // skip reporting reoreged receipts in this method 351 continue 352 } 353 // write to mined chan and continue, as still waiting 354 // on finalizer 355 mined <- receipt 356 } 357 } 358 } 359 }() 360 361 select { 362 case <-ctx.Done(): 363 return nil, nil, ctx.Err() 364 case <-sub.Done(): 365 return nil, nil, ErrSubscriptionClosed 366 case <-exhausted: 367 return nil, finalityFunc, superr.Wrap(ErrFilterExhausted, fmt.Errorf("txnHash=%s maxWait=%d", condTxnHash, condMaxWait)) 368 case receipt, ok := <-mined: 369 if !ok { 370 return nil, nil, ErrSubscriptionClosed 371 } 372 return &receipt, finalityFunc, nil 373 } 374 } 375 376 // fetchTransactionReceipt from the rpc provider, up to some amount of concurrency. When forceFetch is passed, 377 // it indicates that we have high conviction that the receipt should be available, as the monitor has found 378 // this transaction hash. 379 func (l *ReceiptsListener) fetchTransactionReceipt(ctx context.Context, txnHash common.Hash, forceFetch bool) (*types.Receipt, error) { 380 l.fetchSem <- struct{}{} 381 382 resultCh := make(chan *types.Receipt) 383 errCh := make(chan error) 384 385 defer close(resultCh) 386 defer close(errCh) 387 388 go func() { 389 defer func() { 390 <-l.fetchSem 391 }() 392 393 txnHashHex := txnHash.String() 394 395 receipt, ok, _ := l.pastReceipts.Get(ctx, txnHashHex) 396 if ok { 397 resultCh <- receipt 398 return 399 } 400 401 latestBlockNum := l.monitor.LatestBlockNum().Uint64() 402 oldestBlockNum := l.monitor.OldestBlockNum().Uint64() 403 404 // Clear out notFound flag if the monitor has identified the transaction hash 405 if !forceFetch { 406 notFoundBlockNum, notFound, _ := l.notFoundTxnHashes.Get(ctx, txnHashHex) 407 if notFound && notFoundBlockNum >= oldestBlockNum { 408 l.mu.Lock() 409 txn, _ := l.monitor.GetTransaction(txnHash) 410 l.mu.Unlock() 411 if txn != nil { 412 l.log.Debugf("fetchTransactionReceipt(%s) previously not found receipt has now been found in our monitor retention cache", txnHashHex) 413 l.notFoundTxnHashes.Delete(ctx, txnHashHex) 414 notFound = false 415 } 416 } 417 if notFound { 418 errCh <- ethereum.NotFound 419 return 420 } 421 } 422 423 // Fetch the transaction receipt from the node, and use the breaker in case of node failures. 424 err := l.br.Do(ctx, func() error { 425 tctx, clearTimeout := context.WithTimeout(ctx, 4*time.Second) 426 defer clearTimeout() 427 428 receipt, err := l.provider.TransactionReceipt(tctx, txnHash) 429 430 if !forceFetch && errors.Is(err, ethereum.NotFound) { 431 // record the blockNum, maybe this receipt is just too new and nodes are telling 432 // us they can't find it yet, in which case we will rely on the monitor to 433 // clear this flag for us. 434 l.log.Debugf("fetchTransactionReceipt(%s) receipt not found -- flagging in notFoundTxnHashes cache", txnHashHex) 435 l.notFoundTxnHashes.Set(ctx, txnHashHex, latestBlockNum) 436 errCh <- err 437 return nil 438 } else if forceFetch && receipt == nil { 439 // force fetch, lets retry a number of times as the node may end up finding the receipt. 440 // txn has been found in the monitor with event added, but still haven't retrived the receipt. 441 // this could be that we're too fast and node isn't returning the receipt yet. 442 return fmt.Errorf("forceFetch enabled, but failed to fetch receipt %s", txnHash) 443 } 444 if err != nil { 445 return superr.Wrap(fmt.Errorf("failed to fetch receipt %s", txnHash), err) 446 } 447 448 l.pastReceipts.Set(ctx, txnHashHex, receipt) 449 l.notFoundTxnHashes.Delete(ctx, txnHashHex) 450 451 resultCh <- receipt 452 return nil 453 }) 454 455 if err != nil { 456 errCh <- err 457 } 458 }() 459 460 select { 461 case <-ctx.Done(): 462 return nil, ctx.Err() 463 case receipt := <-resultCh: 464 return receipt, nil 465 case err := <-errCh: 466 return nil, err 467 } 468 } 469 470 func (l *ReceiptsListener) listener() error { 471 monitor := l.monitor.Subscribe("ethreceipts") 472 defer monitor.Unsubscribe() 473 474 latestBlockNum := l.latestBlockNum().Uint64() 475 l.log.Debugf("latestBlockNum %d", latestBlockNum) 476 477 g, ctx := errgroup.WithContext(l.ctx) 478 479 // Listen on filter registration to search cached and on-chain receipts 480 g.Go(func() error { 481 for { 482 select { 483 484 case <-ctx.Done(): 485 l.log.Debug("ethreceipts: parent signaled to cancel - receipt listener is quitting") 486 return nil 487 488 case <-monitor.Done(): 489 l.log.Info("ethreceipts: receipt listener is stopped because monitor signaled its stopping") 490 return nil 491 492 // subscriber registered a new filter, lets process past blocks against the new filters 493 case reg, ok := <-l.registerFiltersCh: 494 if !ok { 495 continue 496 } 497 if len(reg.filters) == 0 { 498 continue 499 } 500 501 // check if filters asking to search cache / on-chain 502 filters := make([]Filterer, 0, len(reg.filters)) 503 for _, f := range reg.filters { 504 if f.Options().SearchCache || f.Options().SearchOnChain { 505 filters = append(filters, f) 506 } 507 } 508 if len(filters) == 0 { 509 continue 510 } 511 512 // fetch blocks data from the monitor cache. aka the up to some number 513 // of blocks which are retained by the monitor. the blocks are ordered 514 // from oldest to newest order. 515 l.mu.Lock() 516 blocks := l.monitor.Chain().Blocks() 517 l.mu.Unlock() 518 519 // Search our local blocks cache from monitor retention list 520 matchedList, err := l.processBlocks(blocks, []*subscriber{reg.subscriber}, [][]Filterer{filters}) 521 if err != nil { 522 l.log.Warnf("ethreceipts: failed to process blocks during new filter registration: %v", err) 523 } 524 525 // Finally, search on chain with filters which have had no results. Note, this strategy only 526 // works for txnHash conditions as other filters could have multiple matches. 527 err = l.searchFilterOnChain(ctx, reg.subscriber, collectOk(filters, matchedList[0], false)) 528 if err != nil { 529 l.log.Warnf("ethreceipts: failed to search filter on-chain during new filter registration: %v", err) 530 } 531 } 532 } 533 }) 534 535 // Monitor new blocks for filter matches 536 g.Go(func() error { 537 for { 538 select { 539 540 case <-ctx.Done(): 541 l.log.Debug("ethreceipts: parent signaled to cancel - receipt listener is quitting") 542 return nil 543 544 case <-monitor.Done(): 545 l.log.Info("ethreceipts: receipt listener is stopped because monitor signaled its stopping") 546 return nil 547 548 // monitor newly mined blocks 549 case blocks := <-monitor.Blocks(): 550 if len(blocks) == 0 { 551 continue 552 } 553 554 latestBlockNum = l.latestBlockNum().Uint64() 555 556 // pass blocks across filters of subscribers 557 l.mu.Lock() 558 if len(l.subscribers) == 0 { 559 l.mu.Unlock() 560 continue 561 } 562 subscribers := make([]*subscriber, len(l.subscribers)) 563 copy(subscribers, l.subscribers) 564 filters := make([][]Filterer, len(l.subscribers)) 565 for i := 0; i < len(subscribers); i++ { 566 filters[i] = subscribers[i].Filters() 567 } 568 l.mu.Unlock() 569 570 reorg := false 571 for _, block := range blocks { 572 if block.Event == ethmonitor.Added { 573 // eagerly clear notFoundTxnHashes, just in case 574 for _, txn := range block.Transactions() { 575 l.notFoundTxnHashes.Delete(ctx, txn.Hash().Hex()) 576 } 577 } else if block.Event == ethmonitor.Removed { 578 // delete past receipts of removed blocks 579 reorg = true 580 for _, txn := range block.Transactions() { 581 txnHashHex := txn.Hash().Hex() 582 l.pastReceipts.Delete(ctx, txnHashHex) 583 l.notFoundTxnHashes.Delete(ctx, txnHashHex) 584 } 585 } 586 } 587 588 // mark all filterers of lastMatchBlockNum to 0 in case of reorg 589 if reorg { 590 for _, list := range filters { 591 for _, filterer := range list { 592 if f, _ := filterer.(*filter); f != nil { 593 f.startBlockNum = latestBlockNum 594 f.lastMatchBlockNum = 0 595 } 596 } 597 } 598 } 599 600 // Match blocks against subscribers[i] X filters[i][..] 601 matchedList, err := l.processBlocks(blocks, subscribers, filters) 602 if err != nil { 603 l.log.Warnf("ethreceipts: failed to process blocks: %v", err) 604 } 605 606 // MaxWait exhaust check 607 for x, list := range matchedList { 608 for y, matched := range list { 609 filterer := filters[x][y] 610 if matched || filterer.StartBlockNum() == 0 { 611 if f, _ := filterer.(*filter); f != nil { 612 if f.startBlockNum == 0 { 613 f.startBlockNum = latestBlockNum 614 } 615 if matched { 616 f.lastMatchBlockNum = latestBlockNum 617 } 618 } 619 } else { 620 // NOTE: even if a filter is exhausted, the finalizer will still run 621 // for those transactions which were previously mined and marked by the finalizer. 622 // Therefore, the code below will not impact the functionality of the finalizer. 623 maxWait := l.getMaxWaitBlocks(filterer.Options().MaxWait) 624 blockNum := calc.Max(filterer.StartBlockNum(), filterer.LastMatchBlockNum()) 625 626 if maxWait != 0 && (latestBlockNum-blockNum) >= maxWait { 627 f, _ := filterer.(*filter) 628 if f == nil { 629 panic("ethreceipts: unexpected") 630 } 631 632 if (f.Options().LimitOne && f.LastMatchBlockNum() == 0) || !f.Options().LimitOne { 633 l.log.Debugf("filter exhausted! last block matched:%d maxWait:%d filterID:%d", filterer.LastMatchBlockNum(), maxWait, filterer.FilterID()) 634 635 subscriber := subscribers[x] 636 subscriber.RemoveFilter(filterer) 637 638 select { 639 case <-f.Exhausted(): 640 default: 641 close(f.exhausted) 642 } 643 } 644 } 645 } 646 } 647 } 648 } 649 } 650 }) 651 652 return g.Wait() 653 } 654 655 // processBlocks attempts to match blocks against subscriber[i] X filterers[i].. list of filters. There is 656 // a corresponding list of filters[i] for each subscriber[i]. 657 func (l *ReceiptsListener) processBlocks(blocks ethmonitor.Blocks, subscribers []*subscriber, filterers [][]Filterer) ([][]bool, error) { 658 // oks is the 'ok' match of the filterers [][]Filterer results 659 oks := make([][]bool, len(filterers)) 660 for i, f := range filterers { 661 oks[i] = make([]bool, len(f)) 662 } 663 664 if len(subscribers) == 0 || len(filterers) == 0 { 665 return oks, nil 666 } 667 668 // check each block against each subscriber X filter 669 for _, block := range blocks { 670 // report if the txn was removed 671 reorged := block.Event == ethmonitor.Removed 672 673 receipts := make([]Receipt, len(block.Transactions())) 674 logs := groupLogsByTransaction(block.Logs) 675 676 for i, txn := range block.Transactions() { 677 txnLog, ok := logs[txn.Hash().Hex()] 678 if !ok { 679 txnLog = []*types.Log{} 680 } 681 682 receipts[i] = Receipt{ 683 Reorged: reorged, 684 Final: l.isBlockFinal(block.Number()), 685 logs: txnLog, 686 transaction: txn, 687 } 688 txnMsg, err := ethtxn.AsMessage(txn) 689 if err != nil { 690 // NOTE: this should never happen, but lets log in case it does. In the 691 // future, we should just not use go-ethereum for these types. 692 l.log.Warnf("unexpected failure of txn (%s index %d) on block %d (total txns=%d) AsMessage(..): %s", 693 txn.Hash(), i, block.NumberU64(), len(block.Transactions()), err, 694 ) 695 } else { 696 receipts[i].message = &txnMsg 697 } 698 } 699 700 // match the receipts against the filters 701 var wg sync.WaitGroup 702 for i, sub := range subscribers { 703 wg.Add(1) 704 l.filterSem <- struct{}{} 705 go func(i int, sub *subscriber) { 706 defer func() { 707 <-l.filterSem 708 wg.Done() 709 }() 710 711 // filter matcher 712 matched, err := sub.matchFilters(l.ctx, filterers[i], receipts) 713 if err != nil { 714 l.log.Warnf("error while processing filters: %s", err) 715 } 716 oks[i] = matched 717 718 // check subscriber to finalize any receipts 719 err = sub.finalizeReceipts(block.Number()) 720 if err != nil { 721 l.log.Errorf("finalizeReceipts failed: %v", err) 722 } 723 }(i, sub) 724 } 725 wg.Wait() 726 } 727 728 return oks, nil 729 } 730 731 func (l *ReceiptsListener) searchFilterOnChain(ctx context.Context, subscriber *subscriber, filterers []Filterer) error { 732 for _, filterer := range filterers { 733 if !filterer.Options().SearchOnChain { 734 // skip filters which do not ask to search on chain 735 continue 736 } 737 738 txnHashCond := filterer.Cond().TxnHash 739 if txnHashCond == nil { 740 // skip filters which are not searching for txnHashes directly 741 continue 742 } 743 744 r, err := l.fetchTransactionReceipt(ctx, *txnHashCond, false) 745 if !errors.Is(err, ethereum.NotFound) && err != nil { 746 l.log.Errorf("searchFilterOnChain fetchTransactionReceipt failed: %v", err) 747 } 748 if r == nil { 749 // unable to find the receipt on-chain, lets continue 750 continue 751 } 752 753 if f, ok := filterer.(*filter); ok { 754 f.lastMatchBlockNum = r.BlockNumber.Uint64() 755 } 756 757 receipt := Receipt{ 758 receipt: r, 759 // NOTE: we do not include the transaction at this point, as we don't have it. 760 // transaction: txn, 761 Final: l.isBlockFinal(r.BlockNumber), 762 } 763 764 // will always find the receipt, as it will be in our case previously found above. 765 // this is called so we can broadcast the match to the filterer's subscriber. 766 _, err = subscriber.matchFilters(ctx, []Filterer{filterer}, []Receipt{receipt}) 767 if err != nil { 768 l.log.Errorf("searchFilterOnChain matchFilters failed: %v", err) 769 } 770 } 771 772 return nil 773 } 774 775 func (l *ReceiptsListener) getMaxWaitBlocks(maxWait *int) uint64 { 776 if maxWait == nil { 777 return uint64(l.options.FilterMaxWaitNumBlocks) 778 } else if *maxWait < 0 { 779 return uint64(l.options.NumBlocksToFinality * 2) 780 } else { 781 return uint64(*maxWait) 782 } 783 } 784 785 func (l *ReceiptsListener) isBlockFinal(blockNum *big.Int) bool { 786 latestBlockNum := l.latestBlockNum() 787 if latestBlockNum == nil || blockNum == nil { 788 return false 789 } 790 diff := big.NewInt(0).Sub(latestBlockNum, blockNum) 791 return diff.Cmp(big.NewInt(int64(l.options.NumBlocksToFinality))) >= 0 792 } 793 794 func (l *ReceiptsListener) latestBlockNum() *big.Int { 795 latestBlockNum := l.monitor.LatestBlockNum() 796 if latestBlockNum == nil || latestBlockNum.Cmp(big.NewInt(0)) == 0 { 797 err := l.br.Do(l.ctx, func() error { 798 block, err := l.provider.BlockByNumber(context.Background(), nil) 799 if err != nil { 800 return err 801 } 802 latestBlockNum = block.Number() 803 return nil 804 }) 805 if err != nil || latestBlockNum == nil { 806 return big.NewInt(0) 807 } 808 return latestBlockNum 809 } 810 return latestBlockNum 811 } 812 813 func getChainID(ctx context.Context, provider ethrpc.Interface) (*big.Int, error) { 814 var chainID *big.Int 815 err := breaker.Do(ctx, func() error { 816 ctx, cancel := context.WithTimeout(ctx, 4*time.Second) 817 defer cancel() 818 819 id, err := provider.ChainID(ctx) 820 if err != nil { 821 return err 822 } 823 chainID = id 824 return nil 825 }, nil, 1*time.Second, 2, 3) 826 827 if err != nil { 828 return nil, err 829 } 830 831 return chainID, nil 832 } 833 834 func collectOk[T any](in []T, oks []bool, okCond bool) []T { 835 var out []T 836 for i, v := range in { 837 if oks[i] == okCond { 838 out = append(out, v) 839 } 840 } 841 return out 842 } 843 844 // func txnLogs(blockLogs []types.Log, txnHash ethkit.Hash) []*types.Log { 845 // txnLogs := []*types.Log{} 846 // for i, log := range blockLogs { 847 // if log.TxHash == txnHash { 848 // log := log // copy 849 // txnLogs = append(txnLogs, &log) 850 // if i+1 >= len(blockLogs) || blockLogs[i+1].TxHash != txnHash { 851 // break 852 // } 853 // } 854 // } 855 // return txnLogs 856 // } 857 858 func groupLogsByTransaction(logs []types.Log) map[string][]*types.Log { 859 var out = make(map[string][]*types.Log) 860 for _, log := range logs { 861 log := log 862 863 logTxHash := log.TxHash.Hex() 864 outLogs, ok := out[logTxHash] 865 if !ok { 866 outLogs = []*types.Log{} 867 } 868 869 outLogs = append(outLogs, &log) 870 out[logTxHash] = outLogs 871 } 872 return out 873 } 874 875 func blockLogsCount(numTxns int, logs []types.Log) uint { 876 var max uint = uint(numTxns) 877 for _, log := range logs { 878 if log.TxIndex+1 > max { 879 max = log.TxIndex + 1 880 } 881 } 882 return max 883 }