github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/eth/filters/filter.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package filters
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"math/big"
    23  	"slices"
    24  
    25  	"github.com/ethereum/go-ethereum/common"
    26  	"github.com/ethereum/go-ethereum/core/bloombits"
    27  	"github.com/ethereum/go-ethereum/core/types"
    28  	"github.com/ethereum/go-ethereum/rpc"
    29  )
    30  
    31  // Filter can be used to retrieve and filter logs.
    32  type Filter struct {
    33  	sys *FilterSystem
    34  
    35  	addresses []common.Address
    36  	topics    [][]common.Hash
    37  
    38  	block      *common.Hash // Block hash if filtering a single block
    39  	begin, end int64        // Range interval if filtering multiple blocks
    40  
    41  	matcher *bloombits.Matcher
    42  }
    43  
    44  // NewRangeFilter creates a new filter which uses a bloom filter on blocks to
    45  // figure out whether a particular block is interesting or not.
    46  func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
    47  	// Flatten the address and topic filter clauses into a single bloombits filter
    48  	// system. Since the bloombits are not positional, nil topics are permitted,
    49  	// which get flattened into a nil byte slice.
    50  	var filters [][][]byte
    51  	if len(addresses) > 0 {
    52  		filter := make([][]byte, len(addresses))
    53  		for i, address := range addresses {
    54  			filter[i] = address.Bytes()
    55  		}
    56  		filters = append(filters, filter)
    57  	}
    58  	for _, topicList := range topics {
    59  		filter := make([][]byte, len(topicList))
    60  		for i, topic := range topicList {
    61  			filter[i] = topic.Bytes()
    62  		}
    63  		filters = append(filters, filter)
    64  	}
    65  	size, _ := sys.backend.BloomStatus()
    66  
    67  	// Create a generic filter and convert it into a range filter
    68  	filter := newFilter(sys, addresses, topics)
    69  
    70  	filter.matcher = bloombits.NewMatcher(size, filters)
    71  	filter.begin = begin
    72  	filter.end = end
    73  
    74  	return filter
    75  }
    76  
    77  // NewBlockFilter creates a new filter which directly inspects the contents of
    78  // a block to figure out whether it is interesting or not.
    79  func (sys *FilterSystem) NewBlockFilter(block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter {
    80  	// Create a generic filter and convert it into a block filter
    81  	filter := newFilter(sys, addresses, topics)
    82  	filter.block = &block
    83  	return filter
    84  }
    85  
    86  // newFilter creates a generic filter that can either filter based on a block hash,
    87  // or based on range queries. The search criteria needs to be explicitly set.
    88  func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common.Hash) *Filter {
    89  	return &Filter{
    90  		sys:       sys,
    91  		addresses: addresses,
    92  		topics:    topics,
    93  	}
    94  }
    95  
    96  // Logs searches the blockchain for matching log entries, returning all from the
    97  // first block that contains matches, updating the start of the filter accordingly.
    98  func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
    99  	// If we're doing singleton block filtering, execute and return
   100  	if f.block != nil {
   101  		header, err := f.sys.backend.HeaderByHash(ctx, *f.block)
   102  		if err != nil {
   103  			return nil, err
   104  		}
   105  		if header == nil {
   106  			return nil, errors.New("unknown block")
   107  		}
   108  		return f.blockLogs(ctx, header)
   109  	}
   110  
   111  	// Disallow pending logs.
   112  	if f.begin == rpc.PendingBlockNumber.Int64() || f.end == rpc.PendingBlockNumber.Int64() {
   113  		return nil, errPendingLogsUnsupported
   114  	}
   115  
   116  	resolveSpecial := func(number int64) (int64, error) {
   117  		var hdr *types.Header
   118  		switch number {
   119  		case rpc.LatestBlockNumber.Int64(), rpc.PendingBlockNumber.Int64():
   120  			// we should return head here since we've already captured
   121  			// that we need to get the pending logs in the pending boolean above
   122  			hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
   123  			if hdr == nil {
   124  				return 0, errors.New("latest header not found")
   125  			}
   126  		case rpc.FinalizedBlockNumber.Int64():
   127  			hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
   128  			if hdr == nil {
   129  				return 0, errors.New("finalized header not found")
   130  			}
   131  		case rpc.SafeBlockNumber.Int64():
   132  			hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.SafeBlockNumber)
   133  			if hdr == nil {
   134  				return 0, errors.New("safe header not found")
   135  			}
   136  		default:
   137  			return number, nil
   138  		}
   139  		return hdr.Number.Int64(), nil
   140  	}
   141  
   142  	var err error
   143  	// range query need to resolve the special begin/end block number
   144  	if f.begin, err = resolveSpecial(f.begin); err != nil {
   145  		return nil, err
   146  	}
   147  	if f.end, err = resolveSpecial(f.end); err != nil {
   148  		return nil, err
   149  	}
   150  
   151  	logChan, errChan := f.rangeLogsAsync(ctx)
   152  	var logs []*types.Log
   153  	for {
   154  		select {
   155  		case log := <-logChan:
   156  			logs = append(logs, log)
   157  		case err := <-errChan:
   158  			return logs, err
   159  		}
   160  	}
   161  }
   162  
   163  // rangeLogsAsync retrieves block-range logs that match the filter criteria asynchronously,
   164  // it creates and returns two channels: one for delivering log data, and one for reporting errors.
   165  func (f *Filter) rangeLogsAsync(ctx context.Context) (chan *types.Log, chan error) {
   166  	var (
   167  		logChan = make(chan *types.Log)
   168  		errChan = make(chan error)
   169  	)
   170  
   171  	go func() {
   172  		defer func() {
   173  			close(errChan)
   174  			close(logChan)
   175  		}()
   176  
   177  		// Gather all indexed logs, and finish with non indexed ones
   178  		var (
   179  			end            = uint64(f.end)
   180  			size, sections = f.sys.backend.BloomStatus()
   181  			err            error
   182  		)
   183  		if indexed := sections * size; indexed > uint64(f.begin) {
   184  			if indexed > end {
   185  				indexed = end + 1
   186  			}
   187  			if err = f.indexedLogs(ctx, indexed-1, logChan); err != nil {
   188  				errChan <- err
   189  				return
   190  			}
   191  		}
   192  
   193  		if err := f.unindexedLogs(ctx, end, logChan); err != nil {
   194  			errChan <- err
   195  			return
   196  		}
   197  
   198  		errChan <- nil
   199  	}()
   200  
   201  	return logChan, errChan
   202  }
   203  
   204  // indexedLogs returns the logs matching the filter criteria based on the bloom
   205  // bits indexed available locally or via the network.
   206  func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
   207  	// Create a matcher session and request servicing from the backend
   208  	matches := make(chan uint64, 64)
   209  
   210  	session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
   211  	if err != nil {
   212  		return err
   213  	}
   214  	defer session.Close()
   215  
   216  	f.sys.backend.ServiceFilter(ctx, session)
   217  
   218  	for {
   219  		select {
   220  		case number, ok := <-matches:
   221  			// Abort if all matches have been fulfilled
   222  			if !ok {
   223  				err := session.Error()
   224  				if err == nil {
   225  					f.begin = int64(end) + 1
   226  				}
   227  				return err
   228  			}
   229  			f.begin = int64(number) + 1
   230  
   231  			// Retrieve the suggested block and pull any truly matching logs
   232  			header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
   233  			if header == nil || err != nil {
   234  				return err
   235  			}
   236  			found, err := f.checkMatches(ctx, header)
   237  			if err != nil {
   238  				return err
   239  			}
   240  			for _, log := range found {
   241  				logChan <- log
   242  			}
   243  
   244  		case <-ctx.Done():
   245  			return ctx.Err()
   246  		}
   247  	}
   248  }
   249  
   250  // unindexedLogs returns the logs matching the filter criteria based on raw block
   251  // iteration and bloom matching.
   252  func (f *Filter) unindexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
   253  	for ; f.begin <= int64(end); f.begin++ {
   254  		header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
   255  		if header == nil || err != nil {
   256  			return err
   257  		}
   258  		found, err := f.blockLogs(ctx, header)
   259  		if err != nil {
   260  			return err
   261  		}
   262  		for _, log := range found {
   263  			select {
   264  			case logChan <- log:
   265  			case <-ctx.Done():
   266  				return ctx.Err()
   267  			}
   268  		}
   269  	}
   270  	return nil
   271  }
   272  
   273  // blockLogs returns the logs matching the filter criteria within a single block.
   274  func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.Log, error) {
   275  	if bloomFilter(header.Bloom, f.addresses, f.topics) {
   276  		return f.checkMatches(ctx, header)
   277  	}
   278  	return nil, nil
   279  }
   280  
   281  // checkMatches checks if the receipts belonging to the given header contain any log events that
   282  // match the filter criteria. This function is called when the bloom filter signals a potential match.
   283  // skipFilter signals all logs of the given block are requested.
   284  func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) {
   285  	hash := header.Hash()
   286  	// Logs in cache are partially filled with context data
   287  	// such as tx index, block hash, etc.
   288  	// Notably tx hash is NOT filled in because it needs
   289  	// access to block body data.
   290  	cached, err := f.sys.cachedLogElem(ctx, hash, header.Number.Uint64())
   291  	if err != nil {
   292  		return nil, err
   293  	}
   294  	logs := filterLogs(cached.logs, nil, nil, f.addresses, f.topics)
   295  	if len(logs) == 0 {
   296  		return nil, nil
   297  	}
   298  	// Most backends will deliver un-derived logs, but check nevertheless.
   299  	if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) {
   300  		return logs, nil
   301  	}
   302  
   303  	body, err := f.sys.cachedGetBody(ctx, cached, hash, header.Number.Uint64())
   304  	if err != nil {
   305  		return nil, err
   306  	}
   307  	for i, log := range logs {
   308  		// Copy log not to modify cache elements
   309  		logcopy := *log
   310  		logcopy.TxHash = body.Transactions[logcopy.TxIndex].Hash()
   311  		logs[i] = &logcopy
   312  	}
   313  	return logs, nil
   314  }
   315  
   316  // filterLogs creates a slice of logs matching the given criteria.
   317  func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log {
   318  	var check = func(log *types.Log) bool {
   319  		if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber {
   320  			return false
   321  		}
   322  		if toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber {
   323  			return false
   324  		}
   325  		if len(addresses) > 0 && !slices.Contains(addresses, log.Address) {
   326  			return false
   327  		}
   328  		// If the to filtered topics is greater than the amount of topics in logs, skip.
   329  		if len(topics) > len(log.Topics) {
   330  			return false
   331  		}
   332  		for i, sub := range topics {
   333  			if len(sub) == 0 {
   334  				continue // empty rule set == wildcard
   335  			}
   336  			if !slices.Contains(sub, log.Topics[i]) {
   337  				return false
   338  			}
   339  		}
   340  		return true
   341  	}
   342  	var ret []*types.Log
   343  	for _, log := range logs {
   344  		if check(log) {
   345  			ret = append(ret, log)
   346  		}
   347  	}
   348  	return ret
   349  }
   350  
   351  func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]common.Hash) bool {
   352  	if len(addresses) > 0 {
   353  		var included bool
   354  		for _, addr := range addresses {
   355  			if types.BloomLookup(bloom, addr) {
   356  				included = true
   357  				break
   358  			}
   359  		}
   360  		if !included {
   361  			return false
   362  		}
   363  	}
   364  
   365  	for _, sub := range topics {
   366  		included := len(sub) == 0 // empty rule set == wildcard
   367  		for _, topic := range sub {
   368  			if types.BloomLookup(bloom, topic) {
   369  				included = true
   370  				break
   371  			}
   372  		}
   373  		if !included {
   374  			return false
   375  		}
   376  	}
   377  	return true
   378  }