github.com/amazechain/amc@v0.1.3/internal/api/filters/filter_system.go (about)

     1  package filters
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"github.com/amazechain/amc/common"
     7  	"github.com/amazechain/amc/common/block"
     8  	"github.com/amazechain/amc/common/types"
     9  	"github.com/amazechain/amc/log"
    10  	event "github.com/amazechain/amc/modules/event/v2"
    11  	"github.com/amazechain/amc/modules/rawdb"
    12  	"github.com/amazechain/amc/modules/rpc/jsonrpc"
    13  	"github.com/ledgerwatch/erigon-lib/kv"
    14  	"sync"
    15  	"time"
    16  )
    17  
    18  // Type determines the kind of filter and is used to put the filter in to
    19  // the correct bucket when added.
    20  type Type byte
    21  
    22  const (
    23  	// UnknownSubscription indicates an unknown subscription type
    24  	UnknownSubscription Type = iota
    25  	// LogsSubscription queries for new or removed (chain reorg) logs
    26  	LogsSubscription
    27  	// PendingLogsSubscription queries for logs in pending blocks
    28  	PendingLogsSubscription
    29  	// MinedAndPendingLogsSubscription queries for logs in mined and pending blocks.
    30  	MinedAndPendingLogsSubscription
    31  	// PendingTransactionsSubscription queries tx hashes for pending
    32  	// transactions entering the pending state
    33  	PendingTransactionsSubscription
    34  	// BlocksSubscription queries hashes for blocks that are imported
    35  	BlocksSubscription
    36  	// LastSubscription keeps track of the last index
    37  	LastIndexSubscription
    38  )
    39  
    40  const (
    41  	// txChanSize is the size of channel listening to NewTxsEvent.
    42  	// The number is referenced from the size of tx pool.
    43  	txChanSize = 4096
    44  	// rmLogsChanSize is the size of channel listening to RemovedLogsEvent.
    45  	rmLogsChanSize = 10
    46  	// logsChanSize is the size of channel listening to LogsEvent.
    47  	logsChanSize = 10
    48  	// chainEvChanSize is the size of channel listening to ChainEvent.
    49  	chainEvChanSize = 10
    50  )
    51  
    52  type subscription struct {
    53  	id        jsonrpc.ID
    54  	typ       Type
    55  	created   time.Time
    56  	logsCrit  FilterCriteria
    57  	logs      chan []*block.Log
    58  	hashes    chan []types.Hash
    59  	headers   chan block.IHeader
    60  	installed chan struct{} // closed when the filter is installed
    61  	err       chan error    // closed when the filter is uninstalled
    62  }
    63  
    64  // EventSystem creates subscriptions, processes events and broadcasts them to the
    65  // subscription which match the subscription criteria.
    66  type EventSystem struct {
    67  	api       Api
    68  	lightMode bool
    69  	lastHead  block.IHeader
    70  
    71  	// Subscriptions
    72  	txsSub         event.Subscription // Subscription for new transaction event
    73  	logsSub        event.Subscription // Subscription for new log event
    74  	rmLogsSub      event.Subscription // Subscription for removed log event
    75  	pendingLogsSub event.Subscription // Subscription for pending log event
    76  	chainSub       event.Subscription // Subscription for new chain event
    77  
    78  	// Channels
    79  	install       chan *subscription              // install filter for event notification
    80  	uninstall     chan *subscription              // remove filter for event notification
    81  	txsCh         chan common.NewTxsEvent         // Channel to receive new transactions event
    82  	logsCh        chan common.NewLogsEvent        // Channel to receive new log event
    83  	pendingLogsCh chan common.NewPendingLogsEvent // Channel to receive new log event
    84  	rmLogsCh      chan common.RemovedLogsEvent    // Channel to receive removed log event
    85  	chainCh       chan common.ChainHighestBlock   // Channel to receive new chain event
    86  }
    87  
    88  // NewEventSystem creates a new manager that listens for event on the given mux,
    89  // parses and filters them. It uses the all map to retrieve filter changes. The
    90  // work loop holds its own index that is used to forward events to filters.
    91  //
    92  // The returned manager has a loop that needs to be stopped with the Stop function
    93  // or by stopping the given mux.
    94  func NewEventSystem(api Api) *EventSystem {
    95  
    96  	m := &EventSystem{
    97  		api:           api,
    98  		lightMode:     false,
    99  		install:       make(chan *subscription),
   100  		uninstall:     make(chan *subscription),
   101  		txsCh:         make(chan common.NewTxsEvent),
   102  		logsCh:        make(chan common.NewLogsEvent),
   103  		rmLogsCh:      make(chan common.RemovedLogsEvent),
   104  		pendingLogsCh: make(chan common.NewPendingLogsEvent),
   105  		chainCh:       make(chan common.ChainHighestBlock),
   106  	}
   107  
   108  	// Subscribe events
   109  	m.txsSub = event.GlobalEvent.Subscribe(m.txsCh)
   110  	m.logsSub = event.GlobalEvent.Subscribe(m.logsCh)
   111  	m.rmLogsSub = event.GlobalEvent.Subscribe(m.rmLogsCh)
   112  	m.chainSub = event.GlobalEvent.Subscribe(m.chainCh)
   113  	m.pendingLogsSub = event.GlobalEvent.Subscribe(m.pendingLogsCh)
   114  
   115  	// Make sure none of the subscriptions are empty
   116  	if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil {
   117  		log.Error("Subscribe for event system failed")
   118  	}
   119  
   120  	go m.eventLoop()
   121  	return m
   122  }
   123  
   124  // Subscription is created when the client registers itself for a particular event.
   125  type Subscription struct {
   126  	ID        jsonrpc.ID
   127  	f         *subscription
   128  	es        *EventSystem
   129  	unsubOnce sync.Once
   130  }
   131  
   132  // Err returns a channel that is closed when unsubscribed.
   133  func (sub *Subscription) Err() <-chan error {
   134  	return sub.f.err
   135  }
   136  
   137  // Unsubscribe uninstalls the subscription from the event broadcast loop.
   138  func (sub *Subscription) Unsubscribe() {
   139  	sub.unsubOnce.Do(func() {
   140  	uninstallLoop:
   141  		for {
   142  			// write uninstall request and consume logs/hashes. This prevents
   143  			// the eventLoop broadcast method to deadlock when writing to the
   144  			// filter event channel while the subscription loop is waiting for
   145  			// this method to return (and thus not reading these events).
   146  			select {
   147  			case sub.es.uninstall <- sub.f:
   148  				break uninstallLoop
   149  			case <-sub.f.logs:
   150  			case <-sub.f.hashes:
   151  			case <-sub.f.headers:
   152  			}
   153  		}
   154  
   155  		// wait for filter to be uninstalled in work loop before returning
   156  		// this ensures that the manager won't use the event channel which
   157  		// will probably be closed by the client asap after this method returns.
   158  		<-sub.Err()
   159  	})
   160  }
   161  
   162  // subscribe installs the subscription in the event broadcast loop.
   163  func (es *EventSystem) subscribe(sub *subscription) *Subscription {
   164  	es.install <- sub
   165  	<-sub.installed
   166  	return &Subscription{ID: sub.id, f: sub, es: es}
   167  }
   168  
   169  // SubscribeLogs creates a subscription that will write all logs matching the
   170  // given criteria to the given logs channel. Default value for the from and to
   171  // block is "latest". If the fromBlock > toBlock an error is returned.
   172  func (es *EventSystem) SubscribeLogs(crit FilterCriteria, logs chan []*block.Log) (*Subscription, error) {
   173  	var from, to jsonrpc.BlockNumber
   174  	if crit.FromBlock == nil {
   175  		from = jsonrpc.LatestBlockNumber
   176  	} else {
   177  		from = jsonrpc.BlockNumber(crit.FromBlock.Int64())
   178  	}
   179  	if crit.ToBlock == nil {
   180  		to = jsonrpc.LatestBlockNumber
   181  	} else {
   182  		to = jsonrpc.BlockNumber(crit.ToBlock.Int64())
   183  	}
   184  
   185  	// only interested in pending logs
   186  	if from == jsonrpc.PendingBlockNumber && to == jsonrpc.PendingBlockNumber {
   187  		return es.subscribePendingLogs(crit, logs), nil
   188  	}
   189  	// only interested in new mined logs
   190  	if from == jsonrpc.LatestBlockNumber && to == jsonrpc.LatestBlockNumber {
   191  		return es.subscribeLogs(crit, logs), nil
   192  	}
   193  	// only interested in mined logs within a specific block range
   194  	if from >= 0 && to >= 0 && to >= from {
   195  		return es.subscribeLogs(crit, logs), nil
   196  	}
   197  	// interested in mined logs from a specific block number, new logs and pending logs
   198  	if from >= jsonrpc.LatestBlockNumber && to == jsonrpc.PendingBlockNumber {
   199  		return es.subscribeMinedPendingLogs(crit, logs), nil
   200  	}
   201  	// interested in logs from a specific block number to new mined blocks
   202  	if from >= 0 && to == jsonrpc.LatestBlockNumber {
   203  		return es.subscribeLogs(crit, logs), nil
   204  	}
   205  	return nil, fmt.Errorf("invalid from and to block combination: from > to")
   206  }
   207  
   208  // subscribeMinedPendingLogs creates a subscription that returned mined and
   209  // pending logs that match the given criteria.
   210  func (es *EventSystem) subscribeMinedPendingLogs(crit FilterCriteria, logs chan []*block.Log) *Subscription {
   211  	sub := &subscription{
   212  		id:        jsonrpc.NewID(),
   213  		typ:       MinedAndPendingLogsSubscription,
   214  		logsCrit:  crit,
   215  		created:   time.Now(),
   216  		logs:      logs,
   217  		hashes:    make(chan []types.Hash),
   218  		headers:   make(chan block.IHeader),
   219  		installed: make(chan struct{}),
   220  		err:       make(chan error),
   221  	}
   222  	return es.subscribe(sub)
   223  }
   224  
   225  // subscribeLogs creates a subscription that will write all logs matching the
   226  // given criteria to the given logs channel.
   227  func (es *EventSystem) subscribeLogs(crit FilterCriteria, logs chan []*block.Log) *Subscription {
   228  	sub := &subscription{
   229  		id:        jsonrpc.NewID(),
   230  		typ:       LogsSubscription,
   231  		logsCrit:  crit,
   232  		created:   time.Now(),
   233  		logs:      logs,
   234  		hashes:    make(chan []types.Hash),
   235  		headers:   make(chan block.IHeader),
   236  		installed: make(chan struct{}),
   237  		err:       make(chan error),
   238  	}
   239  	return es.subscribe(sub)
   240  }
   241  
   242  // subscribePendingLogs creates a subscription that writes contract event logs for
   243  // transactions that enter the transaction pool.
   244  func (es *EventSystem) subscribePendingLogs(crit FilterCriteria, logs chan []*block.Log) *Subscription {
   245  	sub := &subscription{
   246  		id:        jsonrpc.NewID(),
   247  		typ:       PendingLogsSubscription,
   248  		logsCrit:  crit,
   249  		created:   time.Now(),
   250  		logs:      logs,
   251  		hashes:    make(chan []types.Hash),
   252  		headers:   make(chan block.IHeader),
   253  		installed: make(chan struct{}),
   254  		err:       make(chan error),
   255  	}
   256  	return es.subscribe(sub)
   257  }
   258  
   259  // SubscribeNewHeads creates a subscription that writes the header of a block that is
   260  // imported in the chain.
   261  func (es *EventSystem) SubscribeNewHeads(headers chan block.IHeader) *Subscription {
   262  	sub := &subscription{
   263  		id:        jsonrpc.NewID(),
   264  		typ:       BlocksSubscription,
   265  		created:   time.Now(),
   266  		logs:      make(chan []*block.Log),
   267  		hashes:    make(chan []types.Hash),
   268  		headers:   headers,
   269  		installed: make(chan struct{}),
   270  		err:       make(chan error),
   271  	}
   272  	return es.subscribe(sub)
   273  }
   274  
   275  // SubscribePendingTxs creates a subscription that writes transaction hashes for
   276  // transactions that enter the transaction pool.
   277  func (es *EventSystem) SubscribePendingTxs(hashes chan []types.Hash) *Subscription {
   278  	sub := &subscription{
   279  		id:        jsonrpc.NewID(),
   280  		typ:       PendingTransactionsSubscription,
   281  		created:   time.Now(),
   282  		logs:      make(chan []*block.Log),
   283  		hashes:    hashes,
   284  		headers:   make(chan block.IHeader),
   285  		installed: make(chan struct{}),
   286  		err:       make(chan error),
   287  	}
   288  	return es.subscribe(sub)
   289  }
   290  
   291  type filterIndex map[Type]map[jsonrpc.ID]*subscription
   292  
   293  func (es *EventSystem) handleLogs(filters filterIndex, ev common.NewLogsEvent) {
   294  	if len(ev.Logs) == 0 {
   295  		return
   296  	}
   297  	for _, f := range filters[LogsSubscription] {
   298  		matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
   299  		if len(matchedLogs) > 0 {
   300  			f.logs <- matchedLogs
   301  		}
   302  	}
   303  }
   304  
   305  func (es *EventSystem) handlePendingLogs(filters filterIndex, ev common.NewPendingLogsEvent) {
   306  	if len(ev.Logs) == 0 {
   307  		return
   308  	}
   309  	for _, f := range filters[PendingLogsSubscription] {
   310  		matchedLogs := filterLogs(ev.Logs, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
   311  		if len(matchedLogs) > 0 {
   312  			f.logs <- matchedLogs
   313  		}
   314  	}
   315  }
   316  
   317  func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev common.RemovedLogsEvent) {
   318  	for _, f := range filters[LogsSubscription] {
   319  		matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
   320  		if len(matchedLogs) > 0 {
   321  			f.logs <- matchedLogs
   322  		}
   323  	}
   324  }
   325  
   326  func (es *EventSystem) handleTxsEvent(filters filterIndex, ev common.NewTxsEvent) {
   327  	hashes := make([]types.Hash, 0, len(ev.Txs))
   328  	for _, tx := range ev.Txs {
   329  		hash := tx.Hash()
   330  		hashes = append(hashes, hash)
   331  	}
   332  	for _, f := range filters[PendingTransactionsSubscription] {
   333  		f.hashes <- hashes
   334  	}
   335  }
   336  
   337  func (es *EventSystem) handleChainEvent(filters filterIndex, ev common.ChainHighestBlock) {
   338  	for _, f := range filters[BlocksSubscription] {
   339  		f.headers <- ev.Block.Header()
   340  	}
   341  	if es.lightMode && len(filters[LogsSubscription]) > 0 {
   342  		es.lightFilterNewHead(ev.Block.Header(), func(header block.IHeader, remove bool) {
   343  			for _, f := range filters[LogsSubscription] {
   344  				if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
   345  					f.logs <- matchedLogs
   346  				}
   347  			}
   348  		})
   349  	}
   350  }
   351  
   352  func (es *EventSystem) lightFilterNewHead(newHeader block.IHeader, callBack func(block.IHeader, bool)) {
   353  	oldh := es.lastHead
   354  	es.lastHead = newHeader
   355  	if oldh == nil {
   356  		return
   357  	}
   358  	newh := newHeader
   359  	// find common ancestor, create list of rolled back and new block hashes
   360  	var oldHeaders, newHeaders []block.IHeader
   361  	for oldh.Hash() != newh.Hash() {
   362  		if oldh.Number64().Uint64() >= newh.Number64().Uint64() {
   363  			oldHeaders = append(oldHeaders, oldh)
   364  			pHash := oldh.(*block.Header).ParentHash
   365  			es.api.Database().View(context.Background(), func(tx kv.Tx) error {
   366  				var err error
   367  				oldh, err = rawdb.ReadHeaderByHash(tx, pHash)
   368  				return err
   369  			})
   370  		}
   371  		if oldh.Number64().Uint64() < newh.Number64().Uint64() {
   372  			newHeaders = append(newHeaders, newh)
   373  			pHash := newh.(*block.Header).ParentHash
   374  			es.api.Database().View(context.Background(), func(tx kv.Tx) error {
   375  				newh, _ = rawdb.ReadHeaderByHash(tx, pHash)
   376  				if newh == nil {
   377  					// happens when CHT syncing, nothing to do
   378  					newh = oldh
   379  				}
   380  				return nil
   381  			})
   382  
   383  		}
   384  	}
   385  	// roll back old blocks
   386  	for _, h := range oldHeaders {
   387  		callBack(h, true)
   388  	}
   389  	// check new blocks (array is in reverse order)
   390  	for i := len(newHeaders) - 1; i >= 0; i-- {
   391  		callBack(newHeaders[i], false)
   392  	}
   393  }
   394  
   395  // filter logs of a single header in light client mode
   396  func (es *EventSystem) lightFilterLogs(header block.IHeader, addresses []types.Address, topics [][]types.Hash, remove bool) []*block.Log {
   397  	//todo header.Bloom
   398  	bloom, _ := types.NewBloom(100)
   399  	if bloomFilter(bloom, addresses, topics) {
   400  		// Get the logs of the block
   401  		_, cancel := context.WithTimeout(context.Background(), time.Second*5)
   402  		defer cancel()
   403  		logsList, err := es.api.BlockChain().GetLogs(header.Hash())
   404  		if err != nil {
   405  			return nil
   406  		}
   407  		var unfiltered []*block.Log
   408  		for _, logs := range logsList {
   409  			for _, log := range logs {
   410  				logcopy := *log
   411  				logcopy.Removed = remove
   412  				unfiltered = append(unfiltered, &logcopy)
   413  			}
   414  		}
   415  		logs := filterLogs(unfiltered, nil, nil, addresses, topics)
   416  		if len(logs) > 0 && logs[0].TxHash == (types.Hash{}) {
   417  			// We have matching but non-derived logs
   418  			receipts, err := es.api.BlockChain().GetReceipts(header.Hash())
   419  			if err != nil {
   420  				return nil
   421  			}
   422  			unfiltered = unfiltered[:0]
   423  			for _, receipt := range receipts {
   424  				for _, log := range receipt.Logs {
   425  					logcopy := *log
   426  					logcopy.Removed = remove
   427  					unfiltered = append(unfiltered, &logcopy)
   428  				}
   429  			}
   430  			logs = filterLogs(unfiltered, nil, nil, addresses, topics)
   431  		}
   432  		return logs
   433  	}
   434  	return nil
   435  }
   436  
   437  // eventLoop (un)installs filters and processes mux events.
   438  func (es *EventSystem) eventLoop() {
   439  	// Ensure all subscriptions get cleaned up
   440  	defer func() {
   441  		es.txsSub.Unsubscribe()
   442  		es.logsSub.Unsubscribe()
   443  		es.rmLogsSub.Unsubscribe()
   444  		es.pendingLogsSub.Unsubscribe()
   445  		es.chainSub.Unsubscribe()
   446  	}()
   447  
   448  	index := make(filterIndex)
   449  	for i := UnknownSubscription; i < LastIndexSubscription; i++ {
   450  		index[i] = make(map[jsonrpc.ID]*subscription)
   451  	}
   452  
   453  	for {
   454  		select {
   455  		case ev := <-es.txsCh:
   456  			es.handleTxsEvent(index, ev)
   457  		case ev := <-es.logsCh:
   458  			es.handleLogs(index, ev)
   459  		case ev := <-es.rmLogsCh:
   460  			es.handleRemovedLogs(index, ev)
   461  		case ev := <-es.pendingLogsCh:
   462  			es.handlePendingLogs(index, ev)
   463  		case ev := <-es.chainCh:
   464  			if ev.Inserted {
   465  				es.handleChainEvent(index, ev)
   466  			}
   467  
   468  		case f := <-es.install:
   469  			if f.typ == MinedAndPendingLogsSubscription {
   470  				// the type are logs and pending logs subscriptions
   471  				index[LogsSubscription][f.id] = f
   472  				index[PendingLogsSubscription][f.id] = f
   473  			} else {
   474  				index[f.typ][f.id] = f
   475  			}
   476  			close(f.installed)
   477  
   478  		case f := <-es.uninstall:
   479  			if f.typ == MinedAndPendingLogsSubscription {
   480  				// the type are logs and pending logs subscriptions
   481  				delete(index[LogsSubscription], f.id)
   482  				delete(index[PendingLogsSubscription], f.id)
   483  			} else {
   484  				delete(index[f.typ], f.id)
   485  			}
   486  			close(f.err)
   487  
   488  		// System stopped
   489  		case <-es.txsSub.Err():
   490  			return
   491  		case <-es.logsSub.Err():
   492  			return
   493  		case <-es.rmLogsSub.Err():
   494  			return
   495  		case <-es.chainSub.Err():
   496  			return
   497  		}
   498  	}
   499  }