github.com/calmw/ethereum@v0.1.1/eth/filters/filter_system.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package filters implements an ethereum filtering system for block, 18 // transactions and log events. 19 package filters 20 21 import ( 22 "context" 23 "fmt" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/calmw/ethereum" 29 "github.com/calmw/ethereum/common" 30 "github.com/calmw/ethereum/common/lru" 31 "github.com/calmw/ethereum/core" 32 "github.com/calmw/ethereum/core/bloombits" 33 "github.com/calmw/ethereum/core/rawdb" 34 "github.com/calmw/ethereum/core/types" 35 "github.com/calmw/ethereum/ethdb" 36 "github.com/calmw/ethereum/event" 37 "github.com/calmw/ethereum/log" 38 "github.com/calmw/ethereum/params" 39 "github.com/calmw/ethereum/rpc" 40 ) 41 42 // Config represents the configuration of the filter system. 43 type Config struct { 44 LogCacheSize int // maximum number of cached blocks (default: 32) 45 Timeout time.Duration // how long filters stay active (default: 5min) 46 } 47 48 func (cfg Config) withDefaults() Config { 49 if cfg.Timeout == 0 { 50 cfg.Timeout = 5 * time.Minute 51 } 52 if cfg.LogCacheSize == 0 { 53 cfg.LogCacheSize = 32 54 } 55 return cfg 56 } 57 58 type Backend interface { 59 ChainDb() ethdb.Database 60 HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) 61 HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) 62 GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) 63 GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) 64 GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) 65 PendingBlockAndReceipts() (*types.Block, types.Receipts) 66 67 CurrentHeader() *types.Header 68 ChainConfig() *params.ChainConfig 69 SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription 70 SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription 71 SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription 72 SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription 73 SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription 74 75 BloomStatus() (uint64, uint64) 76 ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) 77 } 78 79 // FilterSystem holds resources shared by all filters. 80 type FilterSystem struct { 81 backend Backend 82 logsCache *lru.Cache[common.Hash, *logCacheElem] 83 cfg *Config 84 } 85 86 // NewFilterSystem creates a filter system. 87 func NewFilterSystem(backend Backend, config Config) *FilterSystem { 88 config = config.withDefaults() 89 return &FilterSystem{ 90 backend: backend, 91 logsCache: lru.NewCache[common.Hash, *logCacheElem](config.LogCacheSize), 92 cfg: &config, 93 } 94 } 95 96 type logCacheElem struct { 97 logs []*types.Log 98 body atomic.Value 99 } 100 101 // cachedLogElem loads block logs from the backend and caches the result. 102 func (sys *FilterSystem) cachedLogElem(ctx context.Context, blockHash common.Hash, number uint64) (*logCacheElem, error) { 103 cached, ok := sys.logsCache.Get(blockHash) 104 if ok { 105 return cached, nil 106 } 107 108 logs, err := sys.backend.GetLogs(ctx, blockHash, number) 109 if err != nil { 110 return nil, err 111 } 112 if logs == nil { 113 return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", number, blockHash.TerminalString()) 114 } 115 // Database logs are un-derived. 116 // Fill in whatever we can (txHash is inaccessible at this point). 117 flattened := make([]*types.Log, 0) 118 var logIdx uint 119 for i, txLogs := range logs { 120 for _, log := range txLogs { 121 log.BlockHash = blockHash 122 log.BlockNumber = number 123 log.TxIndex = uint(i) 124 log.Index = logIdx 125 logIdx++ 126 flattened = append(flattened, log) 127 } 128 } 129 elem := &logCacheElem{logs: flattened} 130 sys.logsCache.Add(blockHash, elem) 131 return elem, nil 132 } 133 134 func (sys *FilterSystem) cachedGetBody(ctx context.Context, elem *logCacheElem, hash common.Hash, number uint64) (*types.Body, error) { 135 if body := elem.body.Load(); body != nil { 136 return body.(*types.Body), nil 137 } 138 body, err := sys.backend.GetBody(ctx, hash, rpc.BlockNumber(number)) 139 if err != nil { 140 return nil, err 141 } 142 elem.body.Store(body) 143 return body, nil 144 } 145 146 // Type determines the kind of filter and is used to put the filter in to 147 // the correct bucket when added. 148 type Type byte 149 150 const ( 151 // UnknownSubscription indicates an unknown subscription type 152 UnknownSubscription Type = iota 153 // LogsSubscription queries for new or removed (chain reorg) logs 154 LogsSubscription 155 // PendingLogsSubscription queries for logs in pending blocks 156 PendingLogsSubscription 157 // MinedAndPendingLogsSubscription queries for logs in mined and pending blocks. 158 MinedAndPendingLogsSubscription 159 // PendingTransactionsSubscription queries for pending transactions entering 160 // the pending state 161 PendingTransactionsSubscription 162 // BlocksSubscription queries hashes for blocks that are imported 163 BlocksSubscription 164 // LastIndexSubscription keeps track of the last index 165 LastIndexSubscription 166 ) 167 168 const ( 169 // txChanSize is the size of channel listening to NewTxsEvent. 170 // The number is referenced from the size of tx pool. 171 txChanSize = 4096 172 // rmLogsChanSize is the size of channel listening to RemovedLogsEvent. 173 rmLogsChanSize = 10 174 // logsChanSize is the size of channel listening to LogsEvent. 175 logsChanSize = 10 176 // chainEvChanSize is the size of channel listening to ChainEvent. 177 chainEvChanSize = 10 178 ) 179 180 type subscription struct { 181 id rpc.ID 182 typ Type 183 created time.Time 184 logsCrit ethereum.FilterQuery 185 logs chan []*types.Log 186 txs chan []*types.Transaction 187 headers chan *types.Header 188 installed chan struct{} // closed when the filter is installed 189 err chan error // closed when the filter is uninstalled 190 } 191 192 // EventSystem creates subscriptions, processes events and broadcasts them to the 193 // subscription which match the subscription criteria. 194 type EventSystem struct { 195 backend Backend 196 sys *FilterSystem 197 lightMode bool 198 lastHead *types.Header 199 200 // Subscriptions 201 txsSub event.Subscription // Subscription for new transaction event 202 logsSub event.Subscription // Subscription for new log event 203 rmLogsSub event.Subscription // Subscription for removed log event 204 pendingLogsSub event.Subscription // Subscription for pending log event 205 chainSub event.Subscription // Subscription for new chain event 206 207 // Channels 208 install chan *subscription // install filter for event notification 209 uninstall chan *subscription // remove filter for event notification 210 txsCh chan core.NewTxsEvent // Channel to receive new transactions event 211 logsCh chan []*types.Log // Channel to receive new log event 212 pendingLogsCh chan []*types.Log // Channel to receive new log event 213 rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event 214 chainCh chan core.ChainEvent // Channel to receive new chain event 215 } 216 217 // NewEventSystem creates a new manager that listens for event on the given mux, 218 // parses and filters them. It uses the all map to retrieve filter changes. The 219 // work loop holds its own index that is used to forward events to filters. 220 // 221 // The returned manager has a loop that needs to be stopped with the Stop function 222 // or by stopping the given mux. 223 func NewEventSystem(sys *FilterSystem, lightMode bool) *EventSystem { 224 m := &EventSystem{ 225 sys: sys, 226 backend: sys.backend, 227 lightMode: lightMode, 228 install: make(chan *subscription), 229 uninstall: make(chan *subscription), 230 txsCh: make(chan core.NewTxsEvent, txChanSize), 231 logsCh: make(chan []*types.Log, logsChanSize), 232 rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize), 233 pendingLogsCh: make(chan []*types.Log, logsChanSize), 234 chainCh: make(chan core.ChainEvent, chainEvChanSize), 235 } 236 237 // Subscribe events 238 m.txsSub = m.backend.SubscribeNewTxsEvent(m.txsCh) 239 m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh) 240 m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh) 241 m.chainSub = m.backend.SubscribeChainEvent(m.chainCh) 242 m.pendingLogsSub = m.backend.SubscribePendingLogsEvent(m.pendingLogsCh) 243 244 // Make sure none of the subscriptions are empty 245 if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil { 246 log.Crit("Subscribe for event system failed") 247 } 248 249 go m.eventLoop() 250 return m 251 } 252 253 // Subscription is created when the client registers itself for a particular event. 254 type Subscription struct { 255 ID rpc.ID 256 f *subscription 257 es *EventSystem 258 unsubOnce sync.Once 259 } 260 261 // Err returns a channel that is closed when unsubscribed. 262 func (sub *Subscription) Err() <-chan error { 263 return sub.f.err 264 } 265 266 // Unsubscribe uninstalls the subscription from the event broadcast loop. 267 func (sub *Subscription) Unsubscribe() { 268 sub.unsubOnce.Do(func() { 269 uninstallLoop: 270 for { 271 // write uninstall request and consume logs/hashes. This prevents 272 // the eventLoop broadcast method to deadlock when writing to the 273 // filter event channel while the subscription loop is waiting for 274 // this method to return (and thus not reading these events). 275 select { 276 case sub.es.uninstall <- sub.f: 277 break uninstallLoop 278 case <-sub.f.logs: 279 case <-sub.f.txs: 280 case <-sub.f.headers: 281 } 282 } 283 284 // wait for filter to be uninstalled in work loop before returning 285 // this ensures that the manager won't use the event channel which 286 // will probably be closed by the client asap after this method returns. 287 <-sub.Err() 288 }) 289 } 290 291 // subscribe installs the subscription in the event broadcast loop. 292 func (es *EventSystem) subscribe(sub *subscription) *Subscription { 293 es.install <- sub 294 <-sub.installed 295 return &Subscription{ID: sub.id, f: sub, es: es} 296 } 297 298 // SubscribeLogs creates a subscription that will write all logs matching the 299 // given criteria to the given logs channel. Default value for the from and to 300 // block is "latest". If the fromBlock > toBlock an error is returned. 301 func (es *EventSystem) SubscribeLogs(crit ethereum.FilterQuery, logs chan []*types.Log) (*Subscription, error) { 302 var from, to rpc.BlockNumber 303 if crit.FromBlock == nil { 304 from = rpc.LatestBlockNumber 305 } else { 306 from = rpc.BlockNumber(crit.FromBlock.Int64()) 307 } 308 if crit.ToBlock == nil { 309 to = rpc.LatestBlockNumber 310 } else { 311 to = rpc.BlockNumber(crit.ToBlock.Int64()) 312 } 313 314 // only interested in pending logs 315 if from == rpc.PendingBlockNumber && to == rpc.PendingBlockNumber { 316 return es.subscribePendingLogs(crit, logs), nil 317 } 318 // only interested in new mined logs 319 if from == rpc.LatestBlockNumber && to == rpc.LatestBlockNumber { 320 return es.subscribeLogs(crit, logs), nil 321 } 322 // only interested in mined logs within a specific block range 323 if from >= 0 && to >= 0 && to >= from { 324 return es.subscribeLogs(crit, logs), nil 325 } 326 // interested in mined logs from a specific block number, new logs and pending logs 327 if from >= rpc.LatestBlockNumber && to == rpc.PendingBlockNumber { 328 return es.subscribeMinedPendingLogs(crit, logs), nil 329 } 330 // interested in logs from a specific block number to new mined blocks 331 if from >= 0 && to == rpc.LatestBlockNumber { 332 return es.subscribeLogs(crit, logs), nil 333 } 334 return nil, fmt.Errorf("invalid from and to block combination: from > to") 335 } 336 337 // subscribeMinedPendingLogs creates a subscription that returned mined and 338 // pending logs that match the given criteria. 339 func (es *EventSystem) subscribeMinedPendingLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription { 340 sub := &subscription{ 341 id: rpc.NewID(), 342 typ: MinedAndPendingLogsSubscription, 343 logsCrit: crit, 344 created: time.Now(), 345 logs: logs, 346 txs: make(chan []*types.Transaction), 347 headers: make(chan *types.Header), 348 installed: make(chan struct{}), 349 err: make(chan error), 350 } 351 return es.subscribe(sub) 352 } 353 354 // subscribeLogs creates a subscription that will write all logs matching the 355 // given criteria to the given logs channel. 356 func (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription { 357 sub := &subscription{ 358 id: rpc.NewID(), 359 typ: LogsSubscription, 360 logsCrit: crit, 361 created: time.Now(), 362 logs: logs, 363 txs: make(chan []*types.Transaction), 364 headers: make(chan *types.Header), 365 installed: make(chan struct{}), 366 err: make(chan error), 367 } 368 return es.subscribe(sub) 369 } 370 371 // subscribePendingLogs creates a subscription that writes contract event logs for 372 // transactions that enter the transaction pool. 373 func (es *EventSystem) subscribePendingLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription { 374 sub := &subscription{ 375 id: rpc.NewID(), 376 typ: PendingLogsSubscription, 377 logsCrit: crit, 378 created: time.Now(), 379 logs: logs, 380 txs: make(chan []*types.Transaction), 381 headers: make(chan *types.Header), 382 installed: make(chan struct{}), 383 err: make(chan error), 384 } 385 return es.subscribe(sub) 386 } 387 388 // SubscribeNewHeads creates a subscription that writes the header of a block that is 389 // imported in the chain. 390 func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscription { 391 sub := &subscription{ 392 id: rpc.NewID(), 393 typ: BlocksSubscription, 394 created: time.Now(), 395 logs: make(chan []*types.Log), 396 txs: make(chan []*types.Transaction), 397 headers: headers, 398 installed: make(chan struct{}), 399 err: make(chan error), 400 } 401 return es.subscribe(sub) 402 } 403 404 // SubscribePendingTxs creates a subscription that writes transactions for 405 // transactions that enter the transaction pool. 406 func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subscription { 407 sub := &subscription{ 408 id: rpc.NewID(), 409 typ: PendingTransactionsSubscription, 410 created: time.Now(), 411 logs: make(chan []*types.Log), 412 txs: txs, 413 headers: make(chan *types.Header), 414 installed: make(chan struct{}), 415 err: make(chan error), 416 } 417 return es.subscribe(sub) 418 } 419 420 type filterIndex map[Type]map[rpc.ID]*subscription 421 422 func (es *EventSystem) handleLogs(filters filterIndex, ev []*types.Log) { 423 if len(ev) == 0 { 424 return 425 } 426 for _, f := range filters[LogsSubscription] { 427 matchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) 428 if len(matchedLogs) > 0 { 429 f.logs <- matchedLogs 430 } 431 } 432 } 433 434 func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) { 435 if len(ev) == 0 { 436 return 437 } 438 for _, f := range filters[PendingLogsSubscription] { 439 matchedLogs := filterLogs(ev, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) 440 if len(matchedLogs) > 0 { 441 f.logs <- matchedLogs 442 } 443 } 444 } 445 446 func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) { 447 for _, f := range filters[LogsSubscription] { 448 matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) 449 if len(matchedLogs) > 0 { 450 f.logs <- matchedLogs 451 } 452 } 453 } 454 455 func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent) { 456 for _, f := range filters[PendingTransactionsSubscription] { 457 f.txs <- ev.Txs 458 } 459 } 460 461 func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) { 462 for _, f := range filters[BlocksSubscription] { 463 f.headers <- ev.Block.Header() 464 } 465 if es.lightMode && len(filters[LogsSubscription]) > 0 { 466 es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) { 467 for _, f := range filters[LogsSubscription] { 468 if f.logsCrit.FromBlock != nil && header.Number.Cmp(f.logsCrit.FromBlock) < 0 { 469 continue 470 } 471 if f.logsCrit.ToBlock != nil && header.Number.Cmp(f.logsCrit.ToBlock) > 0 { 472 continue 473 } 474 if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 { 475 f.logs <- matchedLogs 476 } 477 } 478 }) 479 } 480 } 481 482 func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func(*types.Header, bool)) { 483 oldh := es.lastHead 484 es.lastHead = newHeader 485 if oldh == nil { 486 return 487 } 488 newh := newHeader 489 // find common ancestor, create list of rolled back and new block hashes 490 var oldHeaders, newHeaders []*types.Header 491 for oldh.Hash() != newh.Hash() { 492 if oldh.Number.Uint64() >= newh.Number.Uint64() { 493 oldHeaders = append(oldHeaders, oldh) 494 oldh = rawdb.ReadHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1) 495 } 496 if oldh.Number.Uint64() < newh.Number.Uint64() { 497 newHeaders = append(newHeaders, newh) 498 newh = rawdb.ReadHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1) 499 if newh == nil { 500 // happens when CHT syncing, nothing to do 501 newh = oldh 502 } 503 } 504 } 505 // roll back old blocks 506 for _, h := range oldHeaders { 507 callBack(h, true) 508 } 509 // check new blocks (array is in reverse order) 510 for i := len(newHeaders) - 1; i >= 0; i-- { 511 callBack(newHeaders[i], false) 512 } 513 } 514 515 // filter logs of a single header in light client mode 516 func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log { 517 if !bloomFilter(header.Bloom, addresses, topics) { 518 return nil 519 } 520 // Get the logs of the block 521 ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) 522 defer cancel() 523 cached, err := es.sys.cachedLogElem(ctx, header.Hash(), header.Number.Uint64()) 524 if err != nil { 525 return nil 526 } 527 unfiltered := append([]*types.Log{}, cached.logs...) 528 for i, log := range unfiltered { 529 // Don't modify in-cache elements 530 logcopy := *log 531 logcopy.Removed = remove 532 // Swap copy in-place 533 unfiltered[i] = &logcopy 534 } 535 logs := filterLogs(unfiltered, nil, nil, addresses, topics) 536 // Txhash is already resolved 537 if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) { 538 return logs 539 } 540 // Resolve txhash 541 body, err := es.sys.cachedGetBody(ctx, cached, header.Hash(), header.Number.Uint64()) 542 if err != nil { 543 return nil 544 } 545 for _, log := range logs { 546 // logs are already copied, safe to modify 547 log.TxHash = body.Transactions[log.TxIndex].Hash() 548 } 549 return logs 550 } 551 552 // eventLoop (un)installs filters and processes mux events. 553 func (es *EventSystem) eventLoop() { 554 // Ensure all subscriptions get cleaned up 555 defer func() { 556 es.txsSub.Unsubscribe() 557 es.logsSub.Unsubscribe() 558 es.rmLogsSub.Unsubscribe() 559 es.pendingLogsSub.Unsubscribe() 560 es.chainSub.Unsubscribe() 561 }() 562 563 index := make(filterIndex) 564 for i := UnknownSubscription; i < LastIndexSubscription; i++ { 565 index[i] = make(map[rpc.ID]*subscription) 566 } 567 568 for { 569 select { 570 case ev := <-es.txsCh: 571 es.handleTxsEvent(index, ev) 572 case ev := <-es.logsCh: 573 es.handleLogs(index, ev) 574 case ev := <-es.rmLogsCh: 575 es.handleRemovedLogs(index, ev) 576 case ev := <-es.pendingLogsCh: 577 es.handlePendingLogs(index, ev) 578 case ev := <-es.chainCh: 579 es.handleChainEvent(index, ev) 580 581 case f := <-es.install: 582 if f.typ == MinedAndPendingLogsSubscription { 583 // the type are logs and pending logs subscriptions 584 index[LogsSubscription][f.id] = f 585 index[PendingLogsSubscription][f.id] = f 586 } else { 587 index[f.typ][f.id] = f 588 } 589 close(f.installed) 590 591 case f := <-es.uninstall: 592 if f.typ == MinedAndPendingLogsSubscription { 593 // the type are logs and pending logs subscriptions 594 delete(index[LogsSubscription], f.id) 595 delete(index[PendingLogsSubscription], f.id) 596 } else { 597 delete(index[f.typ], f.id) 598 } 599 close(f.err) 600 601 // System stopped 602 case <-es.txsSub.Err(): 603 return 604 case <-es.logsSub.Err(): 605 return 606 case <-es.rmLogsSub.Err(): 607 return 608 case <-es.chainSub.Err(): 609 return 610 } 611 } 612 }