github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/eth/filters/filter_system.go (about) 1 // This file is part of the go-sberex library. The go-sberex library is 2 // free software: you can redistribute it and/or modify it under the terms 3 // of the GNU Lesser General Public License as published by the Free 4 // Software Foundation, either version 3 of the License, or (at your option) 5 // any later version. 6 // 7 // The go-sberex library is distributed in the hope that it will be useful, 8 // but WITHOUT ANY WARRANTY; without even the implied warranty of 9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 10 // General Public License <http://www.gnu.org/licenses/> for more details. 11 12 // Package filters implements an sberex filtering system for block, 13 // transactions and log events. 14 package filters 15 16 import ( 17 "context" 18 "errors" 19 "fmt" 20 "sync" 21 "time" 22 23 sberex "github.com/Sberex/go-sberex" 24 "github.com/Sberex/go-sberex/common" 25 "github.com/Sberex/go-sberex/core" 26 "github.com/Sberex/go-sberex/core/types" 27 "github.com/Sberex/go-sberex/event" 28 "github.com/Sberex/go-sberex/rpc" 29 ) 30 31 // Type determines the kind of filter and is used to put the filter in to 32 // the correct bucket when added. 33 type Type byte 34 35 const ( 36 // UnknownSubscription indicates an unknown subscription type 37 UnknownSubscription Type = iota 38 // LogsSubscription queries for new or removed (chain reorg) logs 39 LogsSubscription 40 // PendingLogsSubscription queries for logs in pending blocks 41 PendingLogsSubscription 42 // MinedAndPendingLogsSubscription queries for logs in mined and pending blocks. 43 MinedAndPendingLogsSubscription 44 // PendingTransactionsSubscription queries tx hashes for pending 45 // transactions entering the pending state 46 PendingTransactionsSubscription 47 // BlocksSubscription queries hashes for blocks that are imported 48 BlocksSubscription 49 // LastSubscription keeps track of the last index 50 LastIndexSubscription 51 ) 52 53 const ( 54 55 // txChanSize is the size of channel listening to TxPreEvent. 56 // The number is referenced from the size of tx pool. 57 txChanSize = 4096 58 // rmLogsChanSize is the size of channel listening to RemovedLogsEvent. 59 rmLogsChanSize = 10 60 // logsChanSize is the size of channel listening to LogsEvent. 61 logsChanSize = 10 62 // chainEvChanSize is the size of channel listening to ChainEvent. 63 chainEvChanSize = 10 64 ) 65 66 var ( 67 ErrInvalidSubscriptionID = errors.New("invalid id") 68 ) 69 70 type subscription struct { 71 id rpc.ID 72 typ Type 73 created time.Time 74 logsCrit sberex.FilterQuery 75 logs chan []*types.Log 76 hashes chan common.Hash 77 headers chan *types.Header 78 installed chan struct{} // closed when the filter is installed 79 err chan error // closed when the filter is uninstalled 80 } 81 82 // EventSystem creates subscriptions, processes events and broadcasts them to the 83 // subscription which match the subscription criteria. 84 type EventSystem struct { 85 mux *event.TypeMux 86 backend Backend 87 lightMode bool 88 lastHead *types.Header 89 install chan *subscription // install filter for event notification 90 uninstall chan *subscription // remove filter for event notification 91 } 92 93 // NewEventSystem creates a new manager that listens for event on the given mux, 94 // parses and filters them. It uses the all map to retrieve filter changes. The 95 // work loop holds its own index that is used to forward events to filters. 96 // 97 // The returned manager has a loop that needs to be stopped with the Stop function 98 // or by stopping the given mux. 99 func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventSystem { 100 m := &EventSystem{ 101 mux: mux, 102 backend: backend, 103 lightMode: lightMode, 104 install: make(chan *subscription), 105 uninstall: make(chan *subscription), 106 } 107 108 go m.eventLoop() 109 110 return m 111 } 112 113 // Subscription is created when the client registers itself for a particular event. 114 type Subscription struct { 115 ID rpc.ID 116 f *subscription 117 es *EventSystem 118 unsubOnce sync.Once 119 } 120 121 // Err returns a channel that is closed when unsubscribed. 122 func (sub *Subscription) Err() <-chan error { 123 return sub.f.err 124 } 125 126 // Unsubscribe uninstalls the subscription from the event broadcast loop. 127 func (sub *Subscription) Unsubscribe() { 128 sub.unsubOnce.Do(func() { 129 uninstallLoop: 130 for { 131 // write uninstall request and consume logs/hashes. This prevents 132 // the eventLoop broadcast method to deadlock when writing to the 133 // filter event channel while the subscription loop is waiting for 134 // this method to return (and thus not reading these events). 135 select { 136 case sub.es.uninstall <- sub.f: 137 break uninstallLoop 138 case <-sub.f.logs: 139 case <-sub.f.hashes: 140 case <-sub.f.headers: 141 } 142 } 143 144 // wait for filter to be uninstalled in work loop before returning 145 // this ensures that the manager won't use the event channel which 146 // will probably be closed by the client asap after this method returns. 147 <-sub.Err() 148 }) 149 } 150 151 // subscribe installs the subscription in the event broadcast loop. 152 func (es *EventSystem) subscribe(sub *subscription) *Subscription { 153 es.install <- sub 154 <-sub.installed 155 return &Subscription{ID: sub.id, f: sub, es: es} 156 } 157 158 // SubscribeLogs creates a subscription that will write all logs matching the 159 // given criteria to the given logs channel. Default value for the from and to 160 // block is "latest". If the fromBlock > toBlock an error is returned. 161 func (es *EventSystem) SubscribeLogs(crit sberex.FilterQuery, logs chan []*types.Log) (*Subscription, error) { 162 var from, to rpc.BlockNumber 163 if crit.FromBlock == nil { 164 from = rpc.LatestBlockNumber 165 } else { 166 from = rpc.BlockNumber(crit.FromBlock.Int64()) 167 } 168 if crit.ToBlock == nil { 169 to = rpc.LatestBlockNumber 170 } else { 171 to = rpc.BlockNumber(crit.ToBlock.Int64()) 172 } 173 174 // only interested in pending logs 175 if from == rpc.PendingBlockNumber && to == rpc.PendingBlockNumber { 176 return es.subscribePendingLogs(crit, logs), nil 177 } 178 // only interested in new mined logs 179 if from == rpc.LatestBlockNumber && to == rpc.LatestBlockNumber { 180 return es.subscribeLogs(crit, logs), nil 181 } 182 // only interested in mined logs within a specific block range 183 if from >= 0 && to >= 0 && to >= from { 184 return es.subscribeLogs(crit, logs), nil 185 } 186 // interested in mined logs from a specific block number, new logs and pending logs 187 if from >= rpc.LatestBlockNumber && to == rpc.PendingBlockNumber { 188 return es.subscribeMinedPendingLogs(crit, logs), nil 189 } 190 // interested in logs from a specific block number to new mined blocks 191 if from >= 0 && to == rpc.LatestBlockNumber { 192 return es.subscribeLogs(crit, logs), nil 193 } 194 return nil, fmt.Errorf("invalid from and to block combination: from > to") 195 } 196 197 // subscribeMinedPendingLogs creates a subscription that returned mined and 198 // pending logs that match the given criteria. 199 func (es *EventSystem) subscribeMinedPendingLogs(crit sberex.FilterQuery, logs chan []*types.Log) *Subscription { 200 sub := &subscription{ 201 id: rpc.NewID(), 202 typ: MinedAndPendingLogsSubscription, 203 logsCrit: crit, 204 created: time.Now(), 205 logs: logs, 206 hashes: make(chan common.Hash), 207 headers: make(chan *types.Header), 208 installed: make(chan struct{}), 209 err: make(chan error), 210 } 211 return es.subscribe(sub) 212 } 213 214 // subscribeLogs creates a subscription that will write all logs matching the 215 // given criteria to the given logs channel. 216 func (es *EventSystem) subscribeLogs(crit sberex.FilterQuery, logs chan []*types.Log) *Subscription { 217 sub := &subscription{ 218 id: rpc.NewID(), 219 typ: LogsSubscription, 220 logsCrit: crit, 221 created: time.Now(), 222 logs: logs, 223 hashes: make(chan common.Hash), 224 headers: make(chan *types.Header), 225 installed: make(chan struct{}), 226 err: make(chan error), 227 } 228 return es.subscribe(sub) 229 } 230 231 // subscribePendingLogs creates a subscription that writes transaction hashes for 232 // transactions that enter the transaction pool. 233 func (es *EventSystem) subscribePendingLogs(crit sberex.FilterQuery, logs chan []*types.Log) *Subscription { 234 sub := &subscription{ 235 id: rpc.NewID(), 236 typ: PendingLogsSubscription, 237 logsCrit: crit, 238 created: time.Now(), 239 logs: logs, 240 hashes: make(chan common.Hash), 241 headers: make(chan *types.Header), 242 installed: make(chan struct{}), 243 err: make(chan error), 244 } 245 return es.subscribe(sub) 246 } 247 248 // SubscribeNewHeads creates a subscription that writes the header of a block that is 249 // imported in the chain. 250 func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscription { 251 sub := &subscription{ 252 id: rpc.NewID(), 253 typ: BlocksSubscription, 254 created: time.Now(), 255 logs: make(chan []*types.Log), 256 hashes: make(chan common.Hash), 257 headers: headers, 258 installed: make(chan struct{}), 259 err: make(chan error), 260 } 261 return es.subscribe(sub) 262 } 263 264 // SubscribePendingTxEvents creates a subscription that writes transaction hashes for 265 // transactions that enter the transaction pool. 266 func (es *EventSystem) SubscribePendingTxEvents(hashes chan common.Hash) *Subscription { 267 sub := &subscription{ 268 id: rpc.NewID(), 269 typ: PendingTransactionsSubscription, 270 created: time.Now(), 271 logs: make(chan []*types.Log), 272 hashes: hashes, 273 headers: make(chan *types.Header), 274 installed: make(chan struct{}), 275 err: make(chan error), 276 } 277 return es.subscribe(sub) 278 } 279 280 type filterIndex map[Type]map[rpc.ID]*subscription 281 282 // broadcast event to filters that match criteria. 283 func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) { 284 if ev == nil { 285 return 286 } 287 288 switch e := ev.(type) { 289 case []*types.Log: 290 if len(e) > 0 { 291 for _, f := range filters[LogsSubscription] { 292 if matchedLogs := filterLogs(e, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 { 293 f.logs <- matchedLogs 294 } 295 } 296 } 297 case core.RemovedLogsEvent: 298 for _, f := range filters[LogsSubscription] { 299 if matchedLogs := filterLogs(e.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 { 300 f.logs <- matchedLogs 301 } 302 } 303 case *event.TypeMuxEvent: 304 switch muxe := e.Data.(type) { 305 case core.PendingLogsEvent: 306 for _, f := range filters[PendingLogsSubscription] { 307 if e.Time.After(f.created) { 308 if matchedLogs := filterLogs(muxe.Logs, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 { 309 f.logs <- matchedLogs 310 } 311 } 312 } 313 } 314 case core.TxPreEvent: 315 for _, f := range filters[PendingTransactionsSubscription] { 316 f.hashes <- e.Tx.Hash() 317 } 318 case core.ChainEvent: 319 for _, f := range filters[BlocksSubscription] { 320 f.headers <- e.Block.Header() 321 } 322 if es.lightMode && len(filters[LogsSubscription]) > 0 { 323 es.lightFilterNewHead(e.Block.Header(), func(header *types.Header, remove bool) { 324 for _, f := range filters[LogsSubscription] { 325 if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 { 326 f.logs <- matchedLogs 327 } 328 } 329 }) 330 } 331 } 332 } 333 334 func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func(*types.Header, bool)) { 335 oldh := es.lastHead 336 es.lastHead = newHeader 337 if oldh == nil { 338 return 339 } 340 newh := newHeader 341 // find common ancestor, create list of rolled back and new block hashes 342 var oldHeaders, newHeaders []*types.Header 343 for oldh.Hash() != newh.Hash() { 344 if oldh.Number.Uint64() >= newh.Number.Uint64() { 345 oldHeaders = append(oldHeaders, oldh) 346 oldh = core.GetHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1) 347 } 348 if oldh.Number.Uint64() < newh.Number.Uint64() { 349 newHeaders = append(newHeaders, newh) 350 newh = core.GetHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1) 351 if newh == nil { 352 // happens when CHT syncing, nothing to do 353 newh = oldh 354 } 355 } 356 } 357 // roll back old blocks 358 for _, h := range oldHeaders { 359 callBack(h, true) 360 } 361 // check new blocks (array is in reverse order) 362 for i := len(newHeaders) - 1; i >= 0; i-- { 363 callBack(newHeaders[i], false) 364 } 365 } 366 367 // filter logs of a single header in light client mode 368 func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log { 369 if bloomFilter(header.Bloom, addresses, topics) { 370 // Get the logs of the block 371 ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) 372 defer cancel() 373 logsList, err := es.backend.GetLogs(ctx, header.Hash()) 374 if err != nil { 375 return nil 376 } 377 var unfiltered []*types.Log 378 for _, logs := range logsList { 379 for _, log := range logs { 380 logcopy := *log 381 logcopy.Removed = remove 382 unfiltered = append(unfiltered, &logcopy) 383 } 384 } 385 logs := filterLogs(unfiltered, nil, nil, addresses, topics) 386 if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) { 387 // We have matching but non-derived logs 388 receipts, err := es.backend.GetReceipts(ctx, header.Hash()) 389 if err != nil { 390 return nil 391 } 392 unfiltered = unfiltered[:0] 393 for _, receipt := range receipts { 394 for _, log := range receipt.Logs { 395 logcopy := *log 396 logcopy.Removed = remove 397 unfiltered = append(unfiltered, &logcopy) 398 } 399 } 400 logs = filterLogs(unfiltered, nil, nil, addresses, topics) 401 } 402 return logs 403 } 404 return nil 405 } 406 407 // eventLoop (un)installs filters and processes mux events. 408 func (es *EventSystem) eventLoop() { 409 var ( 410 index = make(filterIndex) 411 sub = es.mux.Subscribe(core.PendingLogsEvent{}) 412 // Subscribe TxPreEvent form txpool 413 txCh = make(chan core.TxPreEvent, txChanSize) 414 txSub = es.backend.SubscribeTxPreEvent(txCh) 415 // Subscribe RemovedLogsEvent 416 rmLogsCh = make(chan core.RemovedLogsEvent, rmLogsChanSize) 417 rmLogsSub = es.backend.SubscribeRemovedLogsEvent(rmLogsCh) 418 // Subscribe []*types.Log 419 logsCh = make(chan []*types.Log, logsChanSize) 420 logsSub = es.backend.SubscribeLogsEvent(logsCh) 421 // Subscribe ChainEvent 422 chainEvCh = make(chan core.ChainEvent, chainEvChanSize) 423 chainEvSub = es.backend.SubscribeChainEvent(chainEvCh) 424 ) 425 426 // Unsubscribe all events 427 defer sub.Unsubscribe() 428 defer txSub.Unsubscribe() 429 defer rmLogsSub.Unsubscribe() 430 defer logsSub.Unsubscribe() 431 defer chainEvSub.Unsubscribe() 432 433 for i := UnknownSubscription; i < LastIndexSubscription; i++ { 434 index[i] = make(map[rpc.ID]*subscription) 435 } 436 437 for { 438 select { 439 case ev, active := <-sub.Chan(): 440 if !active { // system stopped 441 return 442 } 443 es.broadcast(index, ev) 444 445 // Handle subscribed events 446 case ev := <-txCh: 447 es.broadcast(index, ev) 448 case ev := <-rmLogsCh: 449 es.broadcast(index, ev) 450 case ev := <-logsCh: 451 es.broadcast(index, ev) 452 case ev := <-chainEvCh: 453 es.broadcast(index, ev) 454 455 case f := <-es.install: 456 if f.typ == MinedAndPendingLogsSubscription { 457 // the type are logs and pending logs subscriptions 458 index[LogsSubscription][f.id] = f 459 index[PendingLogsSubscription][f.id] = f 460 } else { 461 index[f.typ][f.id] = f 462 } 463 close(f.installed) 464 case f := <-es.uninstall: 465 if f.typ == MinedAndPendingLogsSubscription { 466 // the type are logs and pending logs subscriptions 467 delete(index[LogsSubscription], f.id) 468 delete(index[PendingLogsSubscription], f.id) 469 } else { 470 delete(index[f.typ], f.id) 471 } 472 close(f.err) 473 474 // System stopped 475 case <-txSub.Err(): 476 return 477 case <-rmLogsSub.Err(): 478 return 479 case <-logsSub.Err(): 480 return 481 case <-chainEvSub.Err(): 482 return 483 } 484 } 485 }