github.com/beyonderyue/gochain@v2.2.26+incompatible/eth/filters/filter_system.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package filters implements an ethereum filtering system for block,
    18  // transactions and log events.
    19  package filters
    20  
    21  import (
    22  	"context"
    23  	"errors"
    24  	"fmt"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/gochain-io/gochain"
    29  	"github.com/gochain-io/gochain/common"
    30  	"github.com/gochain-io/gochain/core"
    31  	"github.com/gochain-io/gochain/core/rawdb"
    32  	"github.com/gochain-io/gochain/core/types"
    33  	"github.com/gochain-io/gochain/event"
    34  	"github.com/gochain-io/gochain/rpc"
    35  )
    36  
    37  // Type determines the kind of filter and is used to put the filter in to
    38  // the correct bucket when added.
    39  type Type byte
    40  
    41  const (
    42  	// UnknownSubscription indicates an unknown subscription type
    43  	UnknownSubscription Type = iota
    44  	// LogsSubscription queries for new or removed (chain reorg) logs
    45  	LogsSubscription
    46  	// PendingLogsSubscription queries for logs in pending blocks
    47  	PendingLogsSubscription
    48  	// MinedAndPendingLogsSubscription queries for logs in mined and pending blocks.
    49  	MinedAndPendingLogsSubscription
    50  	// PendingTransactionsSubscription queries tx hashes for pending
    51  	// transactions entering the pending state
    52  	PendingTransactionsSubscription
    53  	// BlocksSubscription queries hashes for blocks that are imported
    54  	BlocksSubscription
    55  	// LastSubscription keeps track of the last index
    56  	LastIndexSubscription
    57  )
    58  
    59  const (
    60  
    61  	// txChanSize is the size of channel listening to NewTxsEvent.
    62  	// The number is referenced from the size of tx pool.
    63  	txChanSize = 16384
    64  	// rmLogsChanSize is the size of channel listening to RemovedLogsEvent.
    65  	rmLogsChanSize = 32
    66  	// logsChanSize is the size of channel listening to LogsEvent.
    67  	logsChanSize = 32
    68  	// chainEvChanSize is the size of channel listening to ChainEvent.
    69  	chainEvChanSize = 32
    70  )
    71  
    72  var (
    73  	ErrInvalidSubscriptionID = errors.New("invalid id")
    74  )
    75  
    76  type subscription struct {
    77  	id        rpc.ID
    78  	typ       Type
    79  	created   time.Time
    80  	logsCrit  gochain.FilterQuery
    81  	logs      chan []*types.Log
    82  	hashes    chan []common.Hash
    83  	headers   chan *types.Header
    84  	installed chan struct{} // closed when the filter is installed
    85  	err       chan error    // closed when the filter is uninstalled
    86  }
    87  
    88  // EventSystem creates subscriptions, processes events and broadcasts them to the
    89  // subscription which match the subscription criteria.
    90  type EventSystem struct {
    91  	mux       *event.TypeMux
    92  	backend   Backend
    93  	lightMode bool
    94  	lastHead  *types.Header
    95  
    96  	// Subscriptions
    97  	pendingLogSub *event.TypeMuxSubscription // Subscription for pending log event
    98  
    99  	// Channels
   100  	install   chan *subscription         // install filter for event notification
   101  	uninstall chan *subscription         // remove filter for event notification
   102  	txsCh     chan core.NewTxsEvent      // Channel to receive new transactions event
   103  	logsCh    chan []*types.Log          // Channel to receive new log event
   104  	rmLogsCh  chan core.RemovedLogsEvent // Channel to receive removed log event
   105  	chainCh   chan core.ChainEvent       // Channel to receive new chain event
   106  }
   107  
   108  // NewEventSystem creates a new manager that listens for event on the given mux,
   109  // parses and filters them. It uses the all map to retrieve filter changes. The
   110  // work loop holds its own index that is used to forward events to filters.
   111  //
   112  // The returned manager has a loop that needs to be stopped with the Stop function
   113  // or by stopping the given mux.
   114  func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventSystem {
   115  	m := &EventSystem{
   116  		mux:       mux,
   117  		backend:   backend,
   118  		lightMode: lightMode,
   119  		install:   make(chan *subscription),
   120  		uninstall: make(chan *subscription),
   121  		txsCh:     make(chan core.NewTxsEvent, txChanSize),
   122  		logsCh:    make(chan []*types.Log, logsChanSize),
   123  		rmLogsCh:  make(chan core.RemovedLogsEvent, rmLogsChanSize),
   124  		chainCh:   make(chan core.ChainEvent, chainEvChanSize),
   125  	}
   126  
   127  	// Subscribe events
   128  	const name = "eth/filters.EventSystem"
   129  	m.backend.SubscribeNewTxsEvent(m.txsCh, name)
   130  	m.backend.SubscribeLogsEvent(m.logsCh, name)
   131  	m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh, name)
   132  	m.backend.SubscribeChainEvent(m.chainCh, name)
   133  	// TODO(rjl493456442): use feed to subscribe pending log event
   134  	m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{})
   135  
   136  	go m.eventLoop()
   137  	return m
   138  }
   139  
   140  // Subscription is created when the client registers itself for a particular event.
   141  type Subscription struct {
   142  	ID        rpc.ID
   143  	f         *subscription
   144  	es        *EventSystem
   145  	unsubOnce sync.Once
   146  }
   147  
   148  // Err returns a channel that is closed when unsubscribed.
   149  func (sub *Subscription) Err() <-chan error {
   150  	return sub.f.err
   151  }
   152  
   153  // Unsubscribe uninstalls the subscription from the event broadcast loop.
   154  func (sub *Subscription) Unsubscribe() {
   155  	sub.unsubOnce.Do(func() {
   156  	uninstallLoop:
   157  		for {
   158  			// write uninstall request and consume logs/hashes. This prevents
   159  			// the eventLoop broadcast method to deadlock when writing to the
   160  			// filter event channel while the subscription loop is waiting for
   161  			// this method to return (and thus not reading these events).
   162  			select {
   163  			case sub.es.uninstall <- sub.f:
   164  				break uninstallLoop
   165  			case <-sub.f.logs:
   166  			case <-sub.f.hashes:
   167  			case <-sub.f.headers:
   168  			}
   169  		}
   170  
   171  		// wait for filter to be uninstalled in work loop before returning
   172  		// this ensures that the manager won't use the event channel which
   173  		// will probably be closed by the client asap after this method returns.
   174  		<-sub.Err()
   175  	})
   176  }
   177  
   178  // subscribe installs the subscription in the event broadcast loop.
   179  func (es *EventSystem) subscribe(sub *subscription) *Subscription {
   180  	es.install <- sub
   181  	<-sub.installed
   182  	return &Subscription{ID: sub.id, f: sub, es: es}
   183  }
   184  
   185  // SubscribeLogs creates a subscription that will write all logs matching the
   186  // given criteria to the given logs channel. Default value for the from and to
   187  // block is "latest". If the fromBlock > toBlock an error is returned.
   188  func (es *EventSystem) SubscribeLogs(crit gochain.FilterQuery, logs chan []*types.Log) (*Subscription, error) {
   189  	var from, to rpc.BlockNumber
   190  	if crit.FromBlock == nil {
   191  		from = rpc.LatestBlockNumber
   192  	} else {
   193  		from = rpc.BlockNumber(crit.FromBlock.Int64())
   194  	}
   195  	if crit.ToBlock == nil {
   196  		to = rpc.LatestBlockNumber
   197  	} else {
   198  		to = rpc.BlockNumber(crit.ToBlock.Int64())
   199  	}
   200  
   201  	// only interested in pending logs
   202  	if from == rpc.PendingBlockNumber && to == rpc.PendingBlockNumber {
   203  		return es.subscribePendingLogs(crit, logs), nil
   204  	}
   205  	// only interested in new mined logs
   206  	if from == rpc.LatestBlockNumber && to == rpc.LatestBlockNumber {
   207  		return es.subscribeLogs(crit, logs), nil
   208  	}
   209  	// only interested in mined logs within a specific block range
   210  	if from >= 0 && to >= 0 && to >= from {
   211  		return es.subscribeLogs(crit, logs), nil
   212  	}
   213  	// interested in mined logs from a specific block number, new logs and pending logs
   214  	if from >= rpc.LatestBlockNumber && to == rpc.PendingBlockNumber {
   215  		return es.subscribeMinedPendingLogs(crit, logs), nil
   216  	}
   217  	// interested in logs from a specific block number to new mined blocks
   218  	if from >= 0 && to == rpc.LatestBlockNumber {
   219  		return es.subscribeLogs(crit, logs), nil
   220  	}
   221  	return nil, fmt.Errorf("invalid from and to block combination: from > to")
   222  }
   223  
   224  // subscribeMinedPendingLogs creates a subscription that returned mined and
   225  // pending logs that match the given criteria.
   226  func (es *EventSystem) subscribeMinedPendingLogs(crit gochain.FilterQuery, logs chan []*types.Log) *Subscription {
   227  	sub := &subscription{
   228  		id:        rpc.NewID(),
   229  		typ:       MinedAndPendingLogsSubscription,
   230  		logsCrit:  crit,
   231  		created:   time.Now(),
   232  		logs:      logs,
   233  		hashes:    make(chan []common.Hash),
   234  		headers:   make(chan *types.Header),
   235  		installed: make(chan struct{}),
   236  		err:       make(chan error),
   237  	}
   238  	return es.subscribe(sub)
   239  }
   240  
   241  // subscribeLogs creates a subscription that will write all logs matching the
   242  // given criteria to the given logs channel.
   243  func (es *EventSystem) subscribeLogs(crit gochain.FilterQuery, logs chan []*types.Log) *Subscription {
   244  	sub := &subscription{
   245  		id:        rpc.NewID(),
   246  		typ:       LogsSubscription,
   247  		logsCrit:  crit,
   248  		created:   time.Now(),
   249  		logs:      logs,
   250  		hashes:    make(chan []common.Hash),
   251  		headers:   make(chan *types.Header),
   252  		installed: make(chan struct{}),
   253  		err:       make(chan error),
   254  	}
   255  	return es.subscribe(sub)
   256  }
   257  
   258  // subscribePendingLogs creates a subscription that writes transaction hashes for
   259  // transactions that enter the transaction pool.
   260  func (es *EventSystem) subscribePendingLogs(crit gochain.FilterQuery, logs chan []*types.Log) *Subscription {
   261  	sub := &subscription{
   262  		id:        rpc.NewID(),
   263  		typ:       PendingLogsSubscription,
   264  		logsCrit:  crit,
   265  		created:   time.Now(),
   266  		logs:      logs,
   267  		hashes:    make(chan []common.Hash),
   268  		headers:   make(chan *types.Header),
   269  		installed: make(chan struct{}),
   270  		err:       make(chan error),
   271  	}
   272  	return es.subscribe(sub)
   273  }
   274  
   275  // SubscribeNewHeads creates a subscription that writes the header of a block that is
   276  // imported in the chain.
   277  func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscription {
   278  	sub := &subscription{
   279  		id:        rpc.NewID(),
   280  		typ:       BlocksSubscription,
   281  		created:   time.Now(),
   282  		logs:      make(chan []*types.Log),
   283  		hashes:    make(chan []common.Hash),
   284  		headers:   headers,
   285  		installed: make(chan struct{}),
   286  		err:       make(chan error),
   287  	}
   288  	return es.subscribe(sub)
   289  }
   290  
   291  // SubscribePendingTxs creates a subscription that writes transaction hashes for
   292  // transactions that enter the transaction pool.
   293  func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription {
   294  	sub := &subscription{
   295  		id:        rpc.NewID(),
   296  		typ:       PendingTransactionsSubscription,
   297  		created:   time.Now(),
   298  		logs:      make(chan []*types.Log),
   299  		hashes:    hashes,
   300  		headers:   make(chan *types.Header),
   301  		installed: make(chan struct{}),
   302  		err:       make(chan error),
   303  	}
   304  	return es.subscribe(sub)
   305  }
   306  
   307  type filterIndex map[Type]map[rpc.ID]*subscription
   308  
   309  func (es *EventSystem) broadcastLogs(filters filterIndex, ev []*types.Log) {
   310  	if ev == nil {
   311  		return
   312  	}
   313  
   314  	if len(ev) > 0 {
   315  		for _, f := range filters[LogsSubscription] {
   316  			if matchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
   317  				f.logs <- matchedLogs
   318  			}
   319  		}
   320  	}
   321  }
   322  
   323  func (es *EventSystem) broadcastRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) {
   324  	for _, f := range filters[LogsSubscription] {
   325  		if matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
   326  			f.logs <- matchedLogs
   327  		}
   328  	}
   329  }
   330  
   331  func (es *EventSystem) broadcastTypeMux(filters filterIndex, ev *event.TypeMuxEvent) {
   332  	if ev == nil {
   333  		return
   334  	}
   335  	switch muxe := ev.Data.(type) {
   336  	case core.PendingLogsEvent:
   337  		for _, f := range filters[PendingLogsSubscription] {
   338  			if ev.Time.After(f.created) {
   339  				if matchedLogs := filterLogs(muxe.Logs, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
   340  					f.logs <- matchedLogs
   341  				}
   342  			}
   343  		}
   344  	}
   345  }
   346  
   347  func (es *EventSystem) broadcastNewTxs(filters filterIndex, ev core.NewTxsEvent) {
   348  	hashes := make([]common.Hash, 0, len(ev.Txs))
   349  	for _, tx := range ev.Txs {
   350  		hashes = append(hashes, tx.Hash())
   351  	}
   352  	for _, f := range filters[PendingTransactionsSubscription] {
   353  		f.hashes <- hashes
   354  	}
   355  }
   356  
   357  func (es *EventSystem) broadcastChain(filters filterIndex, ev core.ChainEvent) {
   358  	for _, f := range filters[BlocksSubscription] {
   359  		f.headers <- ev.Block.Header()
   360  	}
   361  	if es.lightMode && len(filters[LogsSubscription]) > 0 {
   362  		es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) {
   363  			for _, f := range filters[LogsSubscription] {
   364  				if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
   365  					f.logs <- matchedLogs
   366  				}
   367  			}
   368  		})
   369  	}
   370  }
   371  
   372  func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func(*types.Header, bool)) {
   373  	oldh := es.lastHead
   374  	es.lastHead = newHeader
   375  	if oldh == nil {
   376  		return
   377  	}
   378  	newh := newHeader
   379  	// find common ancestor, create list of rolled back and new block hashes
   380  	var oldHeaders, newHeaders []*types.Header
   381  	for oldh.Hash() != newh.Hash() {
   382  		if oldh.Number.Uint64() >= newh.Number.Uint64() {
   383  			oldHeaders = append(oldHeaders, oldh)
   384  			oldh = rawdb.ReadHeader(es.backend.ChainDb().HeaderTable(), oldh.ParentHash, oldh.Number.Uint64()-1)
   385  		}
   386  		if oldh.Number.Uint64() < newh.Number.Uint64() {
   387  			newHeaders = append(newHeaders, newh)
   388  			newh = rawdb.ReadHeader(es.backend.ChainDb().HeaderTable(), newh.ParentHash, newh.Number.Uint64()-1)
   389  			if newh == nil {
   390  				// happens when CHT syncing, nothing to do
   391  				newh = oldh
   392  			}
   393  		}
   394  	}
   395  	// roll back old blocks
   396  	for _, h := range oldHeaders {
   397  		callBack(h, true)
   398  	}
   399  	// check new blocks (array is in reverse order)
   400  	for i := len(newHeaders) - 1; i >= 0; i-- {
   401  		callBack(newHeaders[i], false)
   402  	}
   403  }
   404  
   405  // filter logs of a single header in light client mode
   406  func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log {
   407  	if bloomFilter(header.Bloom, addresses, topics) {
   408  		// Get the logs of the block
   409  		ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
   410  		defer cancel()
   411  		logsList, err := es.backend.GetLogs(ctx, header.Hash())
   412  		if err != nil {
   413  			return nil
   414  		}
   415  		var unfiltered []*types.Log
   416  		for _, logs := range logsList {
   417  			for _, log := range logs {
   418  				logcopy := *log
   419  				logcopy.Removed = remove
   420  				unfiltered = append(unfiltered, &logcopy)
   421  			}
   422  		}
   423  		logs := filterLogs(unfiltered, nil, nil, addresses, topics)
   424  		if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) {
   425  			// We have matching but non-derived logs
   426  			receipts, err := es.backend.GetReceipts(ctx, header.Hash())
   427  			if err != nil {
   428  				return nil
   429  			}
   430  			unfiltered = unfiltered[:0]
   431  			for _, receipt := range receipts {
   432  				for _, log := range receipt.Logs {
   433  					logcopy := *log
   434  					logcopy.Removed = remove
   435  					unfiltered = append(unfiltered, &logcopy)
   436  				}
   437  			}
   438  			logs = filterLogs(unfiltered, nil, nil, addresses, topics)
   439  		}
   440  		return logs
   441  	}
   442  	return nil
   443  }
   444  
   445  // eventLoop (un)installs filters and processes mux events.
   446  func (es *EventSystem) eventLoop() {
   447  	// Ensure all subscriptions get cleaned up
   448  	defer func() {
   449  		es.pendingLogSub.Unsubscribe()
   450  		es.backend.UnsubscribeNewTxsEvent(es.txsCh)
   451  		es.backend.UnsubscribeLogsEvent(es.logsCh)
   452  		es.backend.UnsubscribeRemovedLogsEvent(es.rmLogsCh)
   453  		es.backend.UnsubscribeChainEvent(es.chainCh)
   454  	}()
   455  
   456  	index := make(filterIndex)
   457  	for i := UnknownSubscription; i < LastIndexSubscription; i++ {
   458  		index[i] = make(map[rpc.ID]*subscription)
   459  	}
   460  
   461  	for {
   462  		select {
   463  		// Handle subscribed events
   464  		case ev, ok := <-es.txsCh:
   465  			if !ok {
   466  				return
   467  			}
   468  			es.broadcastNewTxs(index, ev)
   469  		case ev, ok := <-es.logsCh:
   470  			if !ok {
   471  				return
   472  			}
   473  			es.broadcastLogs(index, ev)
   474  		case ev, ok := <-es.rmLogsCh:
   475  			if !ok {
   476  				return
   477  			}
   478  			es.broadcastRemovedLogs(index, ev)
   479  		case ev, ok := <-es.chainCh:
   480  			if !ok {
   481  				return
   482  			}
   483  			es.broadcastChain(index, ev)
   484  		case ev, active := <-es.pendingLogSub.Chan():
   485  			if !active { // system stopped
   486  				return
   487  			}
   488  			es.broadcastTypeMux(index, ev)
   489  
   490  		case f := <-es.install:
   491  			if f.typ == MinedAndPendingLogsSubscription {
   492  				// the type are logs and pending logs subscriptions
   493  				index[LogsSubscription][f.id] = f
   494  				index[PendingLogsSubscription][f.id] = f
   495  			} else {
   496  				index[f.typ][f.id] = f
   497  			}
   498  			close(f.installed)
   499  
   500  		case f := <-es.uninstall:
   501  			if f.typ == MinedAndPendingLogsSubscription {
   502  				// the type are logs and pending logs subscriptions
   503  				delete(index[LogsSubscription], f.id)
   504  				delete(index[PendingLogsSubscription], f.id)
   505  			} else {
   506  				delete(index[f.typ], f.id)
   507  			}
   508  			close(f.err)
   509  
   510  		}
   511  	}
   512  }