github.com/dominant-strategies/go-quai@v0.28.2/eth/filters/filter_system.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package filters implements an quai filtering system for block,
    18  // transactions and log events.
    19  package filters
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"sync"
    25  	"time"
    26  
    27  	quai "github.com/dominant-strategies/go-quai"
    28  	"github.com/dominant-strategies/go-quai/common"
    29  	"github.com/dominant-strategies/go-quai/core"
    30  	"github.com/dominant-strategies/go-quai/core/rawdb"
    31  	"github.com/dominant-strategies/go-quai/core/types"
    32  	"github.com/dominant-strategies/go-quai/event"
    33  	"github.com/dominant-strategies/go-quai/log"
    34  	"github.com/dominant-strategies/go-quai/rpc"
    35  )
    36  
    37  // Type determines the kind of filter and is used to put the filter in to
    38  // the correct bucket when added.
    39  type Type byte
    40  
    41  const (
    42  	// UnknownSubscription indicates an unknown subscription type
    43  	UnknownSubscription Type = iota
    44  	// LogsSubscription queries for new or removed (chain reorg) logs
    45  	LogsSubscription
    46  	// PendingLogsSubscription queries for logs in pending blocks
    47  	PendingLogsSubscription
    48  	// MinedAndPendingLogsSubscription queries for logs in mined and pending blocks.
    49  	MinedAndPendingLogsSubscription
    50  	// PendingTransactionsSubscription queries tx hashes for pending
    51  	// transactions entering the pending state
    52  	PendingTransactionsSubscription
    53  	// BlocksSubscription queries hashes for blocks that are imported
    54  	BlocksSubscription
    55  	// LastSubscription keeps track of the last index
    56  	LastIndexSubscription
    57  )
    58  
    59  const (
    60  	// txChanSize is the size of channel listening to NewTxsEvent.
    61  	// The number is referenced from the size of tx pool.
    62  	txChanSize = 4096
    63  	// rmLogsChanSize is the size of channel listening to RemovedLogsEvent.
    64  	rmLogsChanSize = 10
    65  	// logsChanSize is the size of channel listening to LogsEvent.
    66  	logsChanSize = 10
    67  	// chainEvChanSize is the size of channel listening to ChainEvent.
    68  	chainEvChanSize = 10
    69  )
    70  
    71  type subscription struct {
    72  	id        rpc.ID
    73  	typ       Type
    74  	created   time.Time
    75  	logsCrit  quai.FilterQuery
    76  	logs      chan []*types.Log
    77  	hashes    chan []common.Hash
    78  	headers   chan *types.Header
    79  	header    chan *types.Header
    80  	installed chan struct{} // closed when the filter is installed
    81  	err       chan error    // closed when the filter is uninstalled
    82  }
    83  
    84  // EventSystem creates subscriptions, processes events and broadcasts them to the
    85  // subscription which match the subscription criteria.
    86  type EventSystem struct {
    87  	backend   Backend
    88  	lightMode bool
    89  	lastHead  *types.Header
    90  
    91  	// Subscriptions
    92  	txsSub         event.Subscription // Subscription for new transaction event
    93  	logsSub        event.Subscription // Subscription for new log event
    94  	rmLogsSub      event.Subscription // Subscription for removed log event
    95  	pendingLogsSub event.Subscription // Subscription for pending log event
    96  	chainSub       event.Subscription // Subscription for new chain event
    97  
    98  	// Channels
    99  	install       chan *subscription         // install filter for event notification
   100  	uninstall     chan *subscription         // remove filter for event notification
   101  	txsCh         chan core.NewTxsEvent      // Channel to receive new transactions event
   102  	logsCh        chan []*types.Log          // Channel to receive new log event
   103  	pendingLogsCh chan []*types.Log          // Channel to receive new log event
   104  	rmLogsCh      chan core.RemovedLogsEvent // Channel to receive removed log event
   105  	chainCh       chan core.ChainEvent       // Channel to receive new chain event
   106  }
   107  
   108  // NewEventSystem creates a new manager that listens for event on the given mux,
   109  // parses and filters them. It uses the all map to retrieve filter changes. The
   110  // work loop holds its own index that is used to forward events to filters.
   111  //
   112  // The returned manager has a loop that needs to be stopped with the Stop function
   113  // or by stopping the given mux.
   114  func NewEventSystem(backend Backend, lightMode bool) *EventSystem {
   115  	m := &EventSystem{
   116  		backend:       backend,
   117  		lightMode:     lightMode,
   118  		install:       make(chan *subscription),
   119  		uninstall:     make(chan *subscription),
   120  		txsCh:         make(chan core.NewTxsEvent, txChanSize),
   121  		logsCh:        make(chan []*types.Log, logsChanSize),
   122  		rmLogsCh:      make(chan core.RemovedLogsEvent, rmLogsChanSize),
   123  		pendingLogsCh: make(chan []*types.Log, logsChanSize),
   124  		chainCh:       make(chan core.ChainEvent, chainEvChanSize),
   125  	}
   126  
   127  	nodeCtx := common.NodeLocation.Context()
   128  	// Subscribe events
   129  	if nodeCtx == common.ZONE_CTX && backend.ProcessingState() {
   130  		m.txsSub = m.backend.SubscribeNewTxsEvent(m.txsCh)
   131  		m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh)
   132  		m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh)
   133  		m.pendingLogsSub = m.backend.SubscribePendingLogsEvent(m.pendingLogsCh)
   134  	}
   135  	m.chainSub = m.backend.SubscribeChainEvent(m.chainCh)
   136  
   137  	// Make sure none of the subscriptions are empty
   138  	if nodeCtx == common.ZONE_CTX && backend.ProcessingState() {
   139  		if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil {
   140  			log.Fatal("Subscribe for event system failed")
   141  		}
   142  	} else {
   143  		if m.chainSub == nil {
   144  			log.Fatal("Subscribe for event system failed")
   145  		}
   146  	}
   147  
   148  	go m.eventLoop()
   149  	return m
   150  }
   151  
   152  // Subscription is created when the client registers itself for a particular event.
   153  type Subscription struct {
   154  	ID        rpc.ID
   155  	f         *subscription
   156  	es        *EventSystem
   157  	unsubOnce sync.Once
   158  }
   159  
   160  // Err returns a channel that is closed when unsubscribed.
   161  func (sub *Subscription) Err() <-chan error {
   162  	return sub.f.err
   163  }
   164  
   165  // Unsubscribe uninstalls the subscription from the event broadcast loop.
   166  func (sub *Subscription) Unsubscribe() {
   167  	sub.unsubOnce.Do(func() {
   168  	uninstallLoop:
   169  		for {
   170  			// write uninstall request and consume logs/hashes. This prevents
   171  			// the eventLoop broadcast method to deadlock when writing to the
   172  			// filter event channel while the subscription loop is waiting for
   173  			// this method to return (and thus not reading these events).
   174  			select {
   175  			case sub.es.uninstall <- sub.f:
   176  				break uninstallLoop
   177  			case <-sub.f.logs:
   178  			case <-sub.f.hashes:
   179  			case <-sub.f.headers:
   180  			}
   181  		}
   182  
   183  		// wait for filter to be uninstalled in work loop before returning
   184  		// this ensures that the manager won't use the event channel which
   185  		// will probably be closed by the client asap after this method returns.
   186  		<-sub.Err()
   187  	})
   188  }
   189  
   190  // subscribe installs the subscription in the event broadcast loop.
   191  func (es *EventSystem) subscribe(sub *subscription) *Subscription {
   192  	es.install <- sub
   193  	<-sub.installed
   194  	return &Subscription{ID: sub.id, f: sub, es: es}
   195  }
   196  
   197  // SubscribeLogs creates a subscription that will write all logs matching the
   198  // given criteria to the given logs channel. Default value for the from and to
   199  // block is "latest". If the fromBlock > toBlock an error is returned.
   200  func (es *EventSystem) SubscribeLogs(crit quai.FilterQuery, logs chan []*types.Log) (*Subscription, error) {
   201  	var from, to rpc.BlockNumber
   202  	if crit.FromBlock == nil {
   203  		from = rpc.LatestBlockNumber
   204  	} else {
   205  		from = rpc.BlockNumber(crit.FromBlock.Int64())
   206  	}
   207  	if crit.ToBlock == nil {
   208  		to = rpc.LatestBlockNumber
   209  	} else {
   210  		to = rpc.BlockNumber(crit.ToBlock.Int64())
   211  	}
   212  
   213  	// only interested in pending logs
   214  	if from == rpc.PendingBlockNumber && to == rpc.PendingBlockNumber {
   215  		return es.subscribePendingLogs(crit, logs), nil
   216  	}
   217  	// only interested in new mined logs
   218  	if from == rpc.LatestBlockNumber && to == rpc.LatestBlockNumber {
   219  		return es.subscribeLogs(crit, logs), nil
   220  	}
   221  	// only interested in mined logs within a specific block range
   222  	if from >= 0 && to >= 0 && to >= from {
   223  		return es.subscribeLogs(crit, logs), nil
   224  	}
   225  	// interested in mined logs from a specific block number, new logs and pending logs
   226  	if from >= rpc.LatestBlockNumber && to == rpc.PendingBlockNumber {
   227  		return es.subscribeMinedPendingLogs(crit, logs), nil
   228  	}
   229  	// interested in logs from a specific block number to new mined blocks
   230  	if from >= 0 && to == rpc.LatestBlockNumber {
   231  		return es.subscribeLogs(crit, logs), nil
   232  	}
   233  	return nil, fmt.Errorf("invalid from and to block combination: from > to")
   234  }
   235  
   236  // subscribeMinedPendingLogs creates a subscription that returned mined and
   237  // pending logs that match the given criteria.
   238  func (es *EventSystem) subscribeMinedPendingLogs(crit quai.FilterQuery, logs chan []*types.Log) *Subscription {
   239  	sub := &subscription{
   240  		id:        rpc.NewID(),
   241  		typ:       MinedAndPendingLogsSubscription,
   242  		logsCrit:  crit,
   243  		created:   time.Now(),
   244  		logs:      logs,
   245  		hashes:    make(chan []common.Hash),
   246  		headers:   make(chan *types.Header),
   247  		installed: make(chan struct{}),
   248  		err:       make(chan error),
   249  	}
   250  	return es.subscribe(sub)
   251  }
   252  
   253  // subscribeLogs creates a subscription that will write all logs matching the
   254  // given criteria to the given logs channel.
   255  func (es *EventSystem) subscribeLogs(crit quai.FilterQuery, logs chan []*types.Log) *Subscription {
   256  	sub := &subscription{
   257  		id:        rpc.NewID(),
   258  		typ:       LogsSubscription,
   259  		logsCrit:  crit,
   260  		created:   time.Now(),
   261  		logs:      logs,
   262  		hashes:    make(chan []common.Hash),
   263  		headers:   make(chan *types.Header),
   264  		installed: make(chan struct{}),
   265  		err:       make(chan error),
   266  	}
   267  	return es.subscribe(sub)
   268  }
   269  
   270  // subscribePendingLogs creates a subscription that writes transaction hashes for
   271  // transactions that enter the transaction pool.
   272  func (es *EventSystem) subscribePendingLogs(crit quai.FilterQuery, logs chan []*types.Log) *Subscription {
   273  	sub := &subscription{
   274  		id:        rpc.NewID(),
   275  		typ:       PendingLogsSubscription,
   276  		logsCrit:  crit,
   277  		created:   time.Now(),
   278  		logs:      logs,
   279  		hashes:    make(chan []common.Hash),
   280  		headers:   make(chan *types.Header),
   281  		installed: make(chan struct{}),
   282  		err:       make(chan error),
   283  	}
   284  	return es.subscribe(sub)
   285  }
   286  
   287  // SubscribeNewHeads creates a subscription that writes the header of a block that is
   288  // imported in the chain.
   289  func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscription {
   290  	sub := &subscription{
   291  		id:        rpc.NewID(),
   292  		typ:       BlocksSubscription,
   293  		created:   time.Now(),
   294  		logs:      make(chan []*types.Log),
   295  		hashes:    make(chan []common.Hash),
   296  		headers:   headers,
   297  		installed: make(chan struct{}),
   298  		err:       make(chan error),
   299  	}
   300  	return es.subscribe(sub)
   301  }
   302  
   303  // SubscribePendingTxs creates a subscription that writes transaction hashes for
   304  // transactions that enter the transaction pool.
   305  func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription {
   306  	sub := &subscription{
   307  		id:        rpc.NewID(),
   308  		typ:       PendingTransactionsSubscription,
   309  		created:   time.Now(),
   310  		logs:      make(chan []*types.Log),
   311  		hashes:    hashes,
   312  		headers:   make(chan *types.Header),
   313  		installed: make(chan struct{}),
   314  		err:       make(chan error),
   315  	}
   316  	return es.subscribe(sub)
   317  }
   318  
   319  type filterIndex map[Type]map[rpc.ID]*subscription
   320  
   321  func (es *EventSystem) handleLogs(filters filterIndex, ev []*types.Log) {
   322  	if len(ev) == 0 {
   323  		return
   324  	}
   325  	for _, f := range filters[LogsSubscription] {
   326  		matchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
   327  		if len(matchedLogs) > 0 {
   328  			f.logs <- matchedLogs
   329  		}
   330  	}
   331  }
   332  
   333  func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) {
   334  	if len(ev) == 0 {
   335  		return
   336  	}
   337  	for _, f := range filters[PendingLogsSubscription] {
   338  		matchedLogs := filterLogs(ev, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
   339  		if len(matchedLogs) > 0 {
   340  			f.logs <- matchedLogs
   341  		}
   342  	}
   343  }
   344  
   345  func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) {
   346  	for _, f := range filters[LogsSubscription] {
   347  		matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
   348  		if len(matchedLogs) > 0 {
   349  			f.logs <- matchedLogs
   350  		}
   351  	}
   352  }
   353  
   354  func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent) {
   355  	hashes := make([]common.Hash, 0, len(ev.Txs))
   356  	for _, tx := range ev.Txs {
   357  		hashes = append(hashes, tx.Hash())
   358  	}
   359  	for _, f := range filters[PendingTransactionsSubscription] {
   360  		f.hashes <- hashes
   361  	}
   362  }
   363  
   364  func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) {
   365  	for _, f := range filters[BlocksSubscription] {
   366  		f.headers <- ev.Block.Header()
   367  	}
   368  	if es.lightMode && len(filters[LogsSubscription]) > 0 {
   369  		es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) {
   370  			for _, f := range filters[LogsSubscription] {
   371  				if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
   372  					f.logs <- matchedLogs
   373  				}
   374  			}
   375  		})
   376  	}
   377  }
   378  
   379  func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func(*types.Header, bool)) {
   380  	oldh := es.lastHead
   381  	es.lastHead = newHeader
   382  	if oldh == nil {
   383  		return
   384  	}
   385  	newh := newHeader
   386  	// find common ancestor, create list of rolled back and new block hashes
   387  	var oldHeaders, newHeaders []*types.Header
   388  	for oldh.Hash() != newh.Hash() {
   389  		if oldh.Number().Uint64() >= newh.Number().Uint64() {
   390  			oldHeaders = append(oldHeaders, oldh)
   391  			oldh = rawdb.ReadHeader(es.backend.ChainDb(), oldh.ParentHash(), oldh.Number().Uint64()-1)
   392  		}
   393  		if oldh.Number().Uint64() < newh.Number().Uint64() {
   394  			newHeaders = append(newHeaders, newh)
   395  			newh = rawdb.ReadHeader(es.backend.ChainDb(), newh.ParentHash(), newh.Number().Uint64()-1)
   396  			if newh == nil {
   397  				// happens when CHT syncing, nothing to do
   398  				newh = oldh
   399  			}
   400  		}
   401  	}
   402  	// roll back old blocks
   403  	for _, h := range oldHeaders {
   404  		callBack(h, true)
   405  	}
   406  	// check new blocks (array is in reverse order)
   407  	for i := len(newHeaders) - 1; i >= 0; i-- {
   408  		callBack(newHeaders[i], false)
   409  	}
   410  }
   411  
   412  // filter logs of a single header in light client mode
   413  func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log {
   414  	bloom, err := es.backend.GetBloom(header.Hash())
   415  	if err != nil {
   416  		return nil
   417  	}
   418  	if bloomFilter(*bloom, addresses, topics) {
   419  		// Get the logs of the block
   420  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
   421  		defer cancel()
   422  		logsList, err := es.backend.GetLogs(ctx, header.Hash())
   423  		if err != nil {
   424  			return nil
   425  		}
   426  		var unfiltered []*types.Log
   427  		for _, logs := range logsList {
   428  			for _, log := range logs {
   429  				logcopy := *log
   430  				logcopy.Removed = remove
   431  				unfiltered = append(unfiltered, &logcopy)
   432  			}
   433  		}
   434  		logs := filterLogs(unfiltered, nil, nil, addresses, topics)
   435  		if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) {
   436  			// We have matching but non-derived logs
   437  			receipts, err := es.backend.GetReceipts(ctx, header.Hash())
   438  			if err != nil {
   439  				return nil
   440  			}
   441  			unfiltered = unfiltered[:0]
   442  			for _, receipt := range receipts {
   443  				for _, log := range receipt.Logs {
   444  					logcopy := *log
   445  					logcopy.Removed = remove
   446  					unfiltered = append(unfiltered, &logcopy)
   447  				}
   448  			}
   449  			logs = filterLogs(unfiltered, nil, nil, addresses, topics)
   450  		}
   451  		return logs
   452  	}
   453  	return nil
   454  }
   455  
   456  // eventLoop (un)installs filters and processes mux events.
   457  func (es *EventSystem) eventLoop() {
   458  	nodeCtx := common.NodeLocation.Context()
   459  	// Ensure all subscriptions get cleaned up
   460  	defer func() {
   461  		if nodeCtx == common.ZONE_CTX && es.backend.ProcessingState() {
   462  			es.txsSub.Unsubscribe()
   463  			es.logsSub.Unsubscribe()
   464  			es.rmLogsSub.Unsubscribe()
   465  			es.pendingLogsSub.Unsubscribe()
   466  		}
   467  		es.chainSub.Unsubscribe()
   468  	}()
   469  
   470  	index := make(filterIndex)
   471  	for i := UnknownSubscription; i < LastIndexSubscription; i++ {
   472  		index[i] = make(map[rpc.ID]*subscription)
   473  	}
   474  
   475  	if nodeCtx == common.ZONE_CTX && es.backend.ProcessingState() {
   476  		go es.handleZoneEventLoop(index)
   477  	}
   478  
   479  	for {
   480  		select {
   481  		case ev := <-es.chainCh:
   482  			es.handleChainEvent(index, ev)
   483  
   484  		case f := <-es.install:
   485  			if f.typ != MinedAndPendingLogsSubscription {
   486  				index[f.typ][f.id] = f
   487  			}
   488  			close(f.installed)
   489  
   490  		case f := <-es.uninstall:
   491  			if f.typ != MinedAndPendingLogsSubscription {
   492  				delete(index[f.typ], f.id)
   493  			}
   494  			close(f.err)
   495  
   496  		case <-es.chainSub.Err():
   497  			return
   498  		}
   499  	}
   500  }
   501  
   502  func (es *EventSystem) handleZoneEventLoop(index filterIndex) {
   503  	for {
   504  		select {
   505  		case ev := <-es.txsCh:
   506  			es.handleTxsEvent(index, ev)
   507  		case ev := <-es.logsCh:
   508  			es.handleLogs(index, ev)
   509  		case ev := <-es.rmLogsCh:
   510  			es.handleRemovedLogs(index, ev)
   511  		case ev := <-es.pendingLogsCh:
   512  			es.handlePendingLogs(index, ev)
   513  		case f := <-es.install:
   514  			if f.typ == MinedAndPendingLogsSubscription {
   515  				// the type are logs and pending logs subscriptions
   516  				index[LogsSubscription][f.id] = f
   517  				index[PendingLogsSubscription][f.id] = f
   518  			} else {
   519  				index[f.typ][f.id] = f
   520  			}
   521  			close(f.installed)
   522  
   523  		case f := <-es.uninstall:
   524  			if f.typ == MinedAndPendingLogsSubscription {
   525  				// the type are logs and pending logs subscriptions
   526  				delete(index[LogsSubscription], f.id)
   527  				delete(index[PendingLogsSubscription], f.id)
   528  			} else {
   529  				delete(index[f.typ], f.id)
   530  			}
   531  			close(f.err)
   532  		// System stopped
   533  		case <-es.txsSub.Err():
   534  			return
   535  		case <-es.logsSub.Err():
   536  			return
   537  		case <-es.rmLogsSub.Err():
   538  			return
   539  		}
   540  	}
   541  }