github.com/decred/dcrlnd@v0.7.6/chainscan/historical.go (about)

     1  package chainscan
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"sync"
     8  	"sync/atomic"
     9  
    10  	"github.com/decred/dcrd/chaincfg/chainhash"
    11  	"github.com/decred/dcrd/gcs/v4"
    12  )
    13  
    14  type HistoricalChainSource interface {
    15  	ChainSource
    16  
    17  	// GetCFilter MUST return the block hash, key and filter for the given
    18  	// mainchain block height.
    19  	//
    20  	// If a height higher than the current mainchain tip is specified, the
    21  	// chain source MUST return ErrBlockAfterTip so that the historical
    22  	// scanner will correctly handle targets specified with an invalid
    23  	// ending height.
    24  	GetCFilter(context.Context, int32) (*chainhash.Hash, [16]byte, *gcs.FilterV2, error)
    25  }
    26  
    27  type Historical struct {
    28  	mtx sync.Mutex
    29  	ctx context.Context
    30  
    31  	newTargetsChan chan []*target
    32  	newTargetCount int64
    33  
    34  	// nextBatchTargets are the targets which startHeight have already
    35  	// passed the current height of the batch and will need to be included
    36  	// in the next batch.
    37  	nextBatchTargets []*target
    38  
    39  	chain HistoricalChainSource
    40  }
    41  
    42  func NewHistorical(chain HistoricalChainSource) *Historical {
    43  	return &Historical{
    44  		chain:          chain,
    45  		newTargetsChan: make(chan []*target),
    46  	}
    47  }
    48  
    49  // nextBatchRun returns the next run for the given batch of targets.  The batch
    50  // is modified so that targets left out of this run remain on it.
    51  func nextBatchRun(batch *targetHeap) *targetHeap {
    52  	if len(*batch) == 0 {
    53  		return &targetHeap{}
    54  	}
    55  
    56  	// Begin a new run.
    57  	run := &targetHeap{batch.pop()}
    58  	startHeight := run.peak().startHeight
    59  
    60  	// Find all items that have the same startHeight.
    61  	for t := batch.peak(); t != nil && t.startHeight == startHeight; t = batch.peak() {
    62  		run.push(batch.pop())
    63  	}
    64  	return run
    65  }
    66  
    67  func targetsForNextBatch(height int32, newTargets []*target) ([]*target, []*target) {
    68  	var thisBatch, nextBatch []*target
    69  	for _, nt := range newTargets {
    70  		switch {
    71  		case nt.startHeight <= height:
    72  			log.Tracef("New target delayed to next batch due to %d <= %d",
    73  				nt.startHeight, height)
    74  			nextBatch = append(nextBatch, nt)
    75  
    76  		default:
    77  			log.Tracef("New target Added to this batch due to %d > %d",
    78  				nt.startHeight, height)
    79  			thisBatch = append(thisBatch, nt)
    80  		}
    81  	}
    82  
    83  	return thisBatch, nextBatch
    84  }
    85  
    86  func (h *Historical) drainNewTargets(waiting []*target) ([]*target, error) {
    87  	var zeroEndHeight bool
    88  
    89  	// Determine if any of the outstanding waiting targets has zero
    90  	// endHeight.
    91  	for _, t := range waiting {
    92  		if t.endHeight > 0 {
    93  			continue
    94  		}
    95  		zeroEndHeight = true
    96  		break
    97  	}
    98  
    99  	// While there are outstanding new targets to be received, block while
   100  	// waiting for them so we can decide what to do (add to the current
   101  	// batch or keep it until the next batch starts).
   102  	for atomic.LoadInt64(&h.newTargetCount) > 0 {
   103  		select {
   104  		case <-h.ctx.Done():
   105  			return nil, h.ctx.Err()
   106  		case newTargets := <-h.newTargetsChan:
   107  			waiting = append(waiting, newTargets...)
   108  			atomic.AddInt64(&h.newTargetCount, -1)
   109  
   110  			// If we haven't determined there are targets with an
   111  			// endHeight with a zero value, verify this new batch.
   112  			if zeroEndHeight {
   113  				continue
   114  			}
   115  			for _, nt := range newTargets {
   116  				if nt.endHeight <= 0 {
   117  					zeroEndHeight = true
   118  					break
   119  				}
   120  			}
   121  
   122  		}
   123  	}
   124  
   125  	// If there are new targets with a zero endHeight, fetch the current
   126  	// tip and fill their endHeight.
   127  	if zeroEndHeight {
   128  		_, endHeight, err := h.chain.CurrentTip(h.ctx)
   129  		if err != nil {
   130  			return nil, err
   131  		}
   132  
   133  		for _, nt := range waiting {
   134  			if nt.endHeight <= 0 {
   135  				nt.endHeight = endHeight
   136  			}
   137  		}
   138  	}
   139  
   140  	return waiting, nil
   141  }
   142  
   143  // rescanRun performs the rescan across a single "run" of targets in ascending
   144  // block height.
   145  func (h *Historical) rescanRun(targets *targetList, batch *targetHeap, startHeight int32) error {
   146  	var (
   147  		bcf blockCFilter
   148  		err error
   149  	)
   150  
   151  	log.Tracef("Starting run with %d targets at height %d", len(targets.targets),
   152  		startHeight)
   153  
   154  	for bcf.height = startHeight; !targets.empty(); bcf.height++ {
   155  		if h.ctxDone() {
   156  			return h.ctx.Err()
   157  		}
   158  
   159  		// Fetch cfilter for this block & process it.
   160  		bcf.hash, bcf.cfilterKey, bcf.cfilter, err = h.chain.GetCFilter(h.ctx, bcf.height)
   161  		if err != nil {
   162  			if errors.Is(err, ErrBlockAfterTip{}) {
   163  				// This means at least one target was specified
   164  				// with an endHeight past the current tip or
   165  				// we're in the middle of a reorg. In any case,
   166  				// this isn't a critical error as specified in
   167  				// the documentation for this scanner.
   168  				//
   169  				// Signal all targets as complete.
   170  				signalComplete(targets.removeAll())
   171  				return nil
   172  			}
   173  			return err
   174  		}
   175  
   176  		err = scan(h.ctx, &bcf, targets, h.chain.GetBlock)
   177  		if err != nil {
   178  			return err
   179  		}
   180  
   181  		// Fetch and split new targets between those that can be
   182  		// processed in this batch and those that will need to wait for
   183  		// the next batch.
   184  		newTargets, err := h.drainNewTargets(targets.addedDuringScan)
   185  		if err != nil {
   186  			return err
   187  		}
   188  		thisBatch, nextBatch := targetsForNextBatch(bcf.height, newTargets)
   189  		batch.push(thisBatch...)
   190  		h.nextBatchTargets = append(h.nextBatchTargets, nextBatch...)
   191  
   192  		// Add any targets that can extend this run.
   193  		for t := batch.peak(); t != nil && t.startHeight == bcf.height+1; t = batch.peak() {
   194  			targets.add(batch.pop())
   195  			log.Tracef("Added target to run at height %d: %s",
   196  				bcf.height, t)
   197  		}
   198  
   199  		// Remove canceled and targets which reached their endHeight
   200  		// and signal their completion.
   201  		stale := targets.removeStale(bcf.height)
   202  		signalComplete(stale)
   203  
   204  		if targets.dirty {
   205  			targets.rebuildCfilterEntries()
   206  		}
   207  	}
   208  
   209  	log.Tracef("Ended run at height %d", bcf.height-1)
   210  	return nil
   211  }
   212  
   213  // rescanBatch performs a rescan across all outstanding targets (a "batch" of
   214  // targets) in ascending block height order.
   215  //
   216  // A batch may be composed of multiple disjoint "runs".
   217  func (h *Historical) rescanBatch(targets []*target) error {
   218  	batch := asTargetHeap(targets)
   219  
   220  	log.Debugf("Starting batch of %d targets", len(targets))
   221  
   222  	tl := newTargetList(nil)
   223  	for batch.peak() != nil {
   224  		run := nextBatchRun(batch)
   225  		startHeight := run.peak().startHeight
   226  		tl.add(*run...)
   227  		tl.rebuildCfilterEntries()
   228  		err := h.rescanRun(tl, batch, startHeight)
   229  		if err != nil {
   230  			return err
   231  		}
   232  	}
   233  
   234  	return nil
   235  }
   236  
   237  func (h *Historical) Run(ctx context.Context) error {
   238  	h.mtx.Lock()
   239  	if h.ctx != nil {
   240  		h.mtx.Unlock()
   241  		return errors.New("already running")
   242  	}
   243  	h.ctx = ctx
   244  	h.mtx.Unlock()
   245  
   246  	var err error
   247  
   248  	for {
   249  		// Process any existing waiting targets or wait for some to
   250  		// arrive.
   251  		newTargets := h.nextBatchTargets
   252  		h.nextBatchTargets = nil
   253  		if len(newTargets) == 0 {
   254  			select {
   255  			case <-ctx.Done():
   256  				return ctx.Err()
   257  			case newTargets = <-h.newTargetsChan:
   258  				atomic.AddInt64(&h.newTargetCount, -1)
   259  			}
   260  		}
   261  		newTargets, err = h.drainNewTargets(newTargets)
   262  		if err != nil {
   263  			return err
   264  		}
   265  
   266  		err = h.rescanBatch(newTargets)
   267  		if err != nil {
   268  			return err
   269  		}
   270  	}
   271  }
   272  
   273  // applyOptions applies the given options to the given target and returns the
   274  // concrete implementation of the target or an error.
   275  //
   276  // This is used to ensure the same validation rules are used in both Find and
   277  // FindMany.
   278  func (h *Historical) applyOptions(tgt Target, opts []Option) (*target, error) {
   279  	t, ok := tgt.(*target)
   280  	if !ok {
   281  		return nil, errors.New("provided target should be chainscan.*target")
   282  	}
   283  
   284  	for _, opt := range opts {
   285  		opt(t)
   286  	}
   287  
   288  	if t.endHeight > 0 && t.endHeight < t.startHeight {
   289  		return nil, errors.New("malformed query (endHeight < startHeight)")
   290  	}
   291  
   292  	return t, nil
   293  }
   294  
   295  // ctxDone returns true when the historical's ctx is both filled and Done().
   296  func (h *Historical) ctxDone() bool {
   297  	h.mtx.Lock()
   298  	ctx := h.ctx
   299  	h.mtx.Unlock()
   300  	if ctx == nil {
   301  		return false
   302  	}
   303  	select {
   304  	case <-ctx.Done():
   305  		return true
   306  	default:
   307  		return false
   308  	}
   309  }
   310  
   311  func (h *Historical) Find(tgt Target, opts ...Option) error {
   312  	if h.ctxDone() {
   313  		return errors.New("historical scanner finished running")
   314  	}
   315  
   316  	t, err := h.applyOptions(tgt, opts)
   317  	if err != nil {
   318  		return err
   319  	}
   320  
   321  	// We're about to add new targets, so setup the flag that will let the
   322  	// batch processor know to wait for them.
   323  	newCount := atomic.AddInt64(&h.newTargetCount, 1)
   324  	if newCount < 0 {
   325  		// How did we wrap around an int64? This is super bad.
   326  		panic(fmt.Errorf("wrap around newTargetCount"))
   327  	}
   328  
   329  	// Signal the existance of new targets in a new goroutine to avoid
   330  	// locking.
   331  	go func() {
   332  		h.newTargetsChan <- []*target{t}
   333  	}()
   334  
   335  	return nil
   336  }
   337  
   338  // FindMany attempts to search for many targets at once. This is better than
   339  // making individual calls to Find() when they should be searched for at the
   340  // same starting height since all specified targets are guaranteed to be
   341  // included in the same search batch.
   342  //
   343  // This function is safe for concurrent calls in multiple goroutines, including
   344  // inside functions specified with WithFoundCallback() options.
   345  func (h *Historical) FindMany(targets []TargetAndOptions) error {
   346  	if h.ctxDone() {
   347  		return errors.New("historical scanner finished running")
   348  	}
   349  
   350  	ts := make([]*target, len(targets))
   351  	var err error
   352  
   353  	for i, tgt := range targets {
   354  		ts[i], err = h.applyOptions(tgt.Target, tgt.Options)
   355  		if err != nil {
   356  			return err
   357  		}
   358  	}
   359  
   360  	// We're about to add new targets, so setup the flag that will let the
   361  	// batch processor know to wait for them.
   362  	newCount := atomic.AddInt64(&h.newTargetCount, 1)
   363  	if newCount < 0 {
   364  		// How did we wrap around an int64? This is super bad.
   365  		panic(fmt.Errorf("wrap around newTargetCount"))
   366  	}
   367  
   368  	// Signal the existance of new targets in a new goroutine to avoid
   369  	// locking.
   370  	go func() {
   371  		h.newTargetsChan <- ts
   372  	}()
   373  
   374  	return nil
   375  
   376  }