github.com/decred/dcrlnd@v0.7.6/sweep/sweeper.go (about)

     1  package sweep
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math/rand"
     7  	"sort"
     8  	"sync"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	"github.com/davecgh/go-spew/spew"
    13  	"github.com/decred/dcrd/chaincfg/chainhash"
    14  	"github.com/decred/dcrd/chaincfg/v3"
    15  	"github.com/decred/dcrd/dcrutil/v4"
    16  	"github.com/decred/dcrd/wire"
    17  	"github.com/decred/dcrlnd/chainntnfs"
    18  	"github.com/decred/dcrlnd/input"
    19  	"github.com/decred/dcrlnd/labels"
    20  	"github.com/decred/dcrlnd/lnwallet"
    21  	"github.com/decred/dcrlnd/lnwallet/chainfee"
    22  )
    23  
    24  const (
    25  	// DefaultMaxFeeRate is the default maximum fee rate allowed within the
    26  	// UtxoSweeper. The current value is equivalent to a fee rate of
    27  	// 0.00100000 DCR/KByte.
    28  	DefaultMaxFeeRate chainfee.AtomPerKByte = 1e6
    29  
    30  	// DefaultFeeRateBucketSize is the default size of fee rate buckets
    31  	// we'll use when clustering inputs into buckets with similar fee rates
    32  	// within the UtxoSweeper.
    33  	//
    34  	// Given a minimum relay fee rate of 1 atom/KB, a multiplier of 10
    35  	// would result in the following fee rate buckets up to the maximum fee
    36  	// rate:
    37  	//
    38  	//   #1: min = 1 atom/KB, max = 10 atom/KB
    39  	//   #2: min = 11 atom/KB, max = 20 atom/KB...
    40  	DefaultFeeRateBucketSize = 10
    41  )
    42  
    43  var (
    44  	// ErrRemoteSpend is returned in case an output that we try to sweep is
    45  	// confirmed in a tx of the remote party.
    46  	ErrRemoteSpend = errors.New("remote party swept utxo")
    47  
    48  	// ErrTooManyAttempts is returned in case sweeping an output has failed
    49  	// for the configured max number of attempts.
    50  	ErrTooManyAttempts = errors.New("sweep failed after max attempts")
    51  
    52  	// ErrNoFeePreference is returned when we attempt to satisfy a sweep
    53  	// request from a client whom did not specify a fee preference.
    54  	ErrNoFeePreference = errors.New("no fee preference specified")
    55  
    56  	// ErrExclusiveGroupSpend is returned in case a different input of the
    57  	// same exclusive group was spent.
    58  	ErrExclusiveGroupSpend = errors.New("other member of exclusive group " +
    59  		"was spent")
    60  
    61  	// ErrSweeperShuttingDown is an error returned when a client attempts to
    62  	// make a request to the UtxoSweeper, but it is unable to handle it as
    63  	// it is/has already been stopped.
    64  	ErrSweeperShuttingDown = errors.New("utxo sweeper shutting down")
    65  
    66  	// DefaultMaxSweepAttempts specifies the default maximum number of times
    67  	// an input is included in a publish attempt before giving up and
    68  	// returning an error to the caller.
    69  	DefaultMaxSweepAttempts = 10
    70  )
    71  
    72  // Params contains the parameters that control the sweeping process.
    73  type Params struct {
    74  	// Fee is the fee preference of the client who requested the input to be
    75  	// swept. If a confirmation target is specified, then we'll map it into
    76  	// a fee rate whenever we attempt to cluster inputs for a sweep.
    77  	Fee FeePreference
    78  
    79  	// Force indicates whether the input should be swept regardless of
    80  	// whether it is economical to do so.
    81  	Force bool
    82  
    83  	// ExclusiveGroup is an identifier that, if set, prevents other inputs
    84  	// with the same identifier from being batched together.
    85  	ExclusiveGroup *uint64
    86  }
    87  
    88  // ParamsUpdate contains a new set of parameters to update a pending sweep with.
    89  type ParamsUpdate struct {
    90  	// Fee is the fee preference of the client who requested the input to be
    91  	// swept. If a confirmation target is specified, then we'll map it into
    92  	// a fee rate whenever we attempt to cluster inputs for a sweep.
    93  	Fee FeePreference
    94  
    95  	// Force indicates whether the input should be swept regardless of
    96  	// whether it is economical to do so.
    97  	Force bool
    98  }
    99  
   100  // String returns a human readable interpretation of the sweep parameters.
   101  func (p Params) String() string {
   102  	return fmt.Sprintf("fee=%v, force=%v, exclusive_group=%v",
   103  		p.Fee, p.Force, p.ExclusiveGroup)
   104  }
   105  
   106  // pendingInput is created when an input reaches the main loop for the first
   107  // time. It wraps the input and tracks all relevant state that is needed for
   108  // sweeping.
   109  type pendingInput struct {
   110  	input.Input
   111  
   112  	// listeners is a list of channels over which the final outcome of the
   113  	// sweep needs to be broadcasted.
   114  	listeners []chan Result
   115  
   116  	// ntfnRegCancel is populated with a function that cancels the chain
   117  	// notifier spend registration.
   118  	ntfnRegCancel func()
   119  
   120  	// minPublishHeight indicates the minimum block height at which this
   121  	// input may be (re)published.
   122  	minPublishHeight int32
   123  
   124  	// publishAttempts records the number of attempts that have already been
   125  	// made to sweep this tx.
   126  	publishAttempts int
   127  
   128  	// params contains the parameters that control the sweeping process.
   129  	params Params
   130  
   131  	// lastFeeRate is the most recent fee rate used for this input within a
   132  	// transaction broadcast to the network.
   133  	lastFeeRate chainfee.AtomPerKByte
   134  }
   135  
   136  // parameters returns the sweep parameters for this input.
   137  //
   138  // NOTE: Part of the txInput interface.
   139  func (p *pendingInput) parameters() Params {
   140  	return p.params
   141  }
   142  
   143  // pendingInputs is a type alias for a set of pending inputs.
   144  type pendingInputs = map[wire.OutPoint]*pendingInput
   145  
   146  // inputCluster is a helper struct to gather a set of pending inputs that should
   147  // be swept with the specified fee rate.
   148  type inputCluster struct {
   149  	lockTime     *uint32
   150  	sweepFeeRate chainfee.AtomPerKByte
   151  	inputs       pendingInputs
   152  }
   153  
   154  // pendingSweepsReq is an internal message we'll use to represent an external
   155  // caller's intent to retrieve all of the pending inputs the UtxoSweeper is
   156  // attempting to sweep.
   157  type pendingSweepsReq struct {
   158  	respChan chan map[wire.OutPoint]*PendingInput
   159  	errChan  chan error
   160  }
   161  
   162  // PendingInput contains information about an input that is currently being
   163  // swept by the UtxoSweeper.
   164  type PendingInput struct {
   165  	// OutPoint is the identify outpoint of the input being swept.
   166  	OutPoint wire.OutPoint
   167  
   168  	// WitnessType is the witness type of the input being swept.
   169  	WitnessType input.WitnessType
   170  
   171  	// Amount is the amount of the input being swept.
   172  	Amount dcrutil.Amount
   173  
   174  	// LastFeeRate is the most recent fee rate used for the input being
   175  	// swept within a transaction broadcast to the network.
   176  	LastFeeRate chainfee.AtomPerKByte
   177  
   178  	// BroadcastAttempts is the number of attempts we've made to sweept the
   179  	// input.
   180  	BroadcastAttempts int
   181  
   182  	// NextBroadcastHeight is the next height of the chain at which we'll
   183  	// attempt to broadcast a transaction sweeping the input.
   184  	NextBroadcastHeight uint32
   185  
   186  	// Params contains the sweep parameters for this pending request.
   187  	Params Params
   188  }
   189  
   190  // updateReq is an internal message we'll use to represent an external caller's
   191  // intent to update the sweep parameters of a given input.
   192  type updateReq struct {
   193  	input        wire.OutPoint
   194  	params       ParamsUpdate
   195  	responseChan chan *updateResp
   196  }
   197  
   198  // updateResp is an internal message we'll use to hand off the response of a
   199  // updateReq from the UtxoSweeper's main event loop back to the caller.
   200  type updateResp struct {
   201  	resultChan chan Result
   202  	err        error
   203  }
   204  
   205  // UtxoSweeper is responsible for sweeping outputs back into the wallet
   206  type UtxoSweeper struct {
   207  	started uint32 // To be used atomically.
   208  	stopped uint32 // To be used atomically.
   209  
   210  	cfg *UtxoSweeperConfig
   211  
   212  	newInputs chan *sweepInputMessage
   213  	spendChan chan *chainntnfs.SpendDetail
   214  
   215  	// pendingSweepsReq is a channel that will be sent requests by external
   216  	// callers in order to retrieve the set of pending inputs the
   217  	// UtxoSweeper is attempting to sweep.
   218  	pendingSweepsReqs chan *pendingSweepsReq
   219  
   220  	// updateReqs is a channel that will be sent requests by external
   221  	// callers who wish to bump the fee rate of a given input.
   222  	updateReqs chan *updateReq
   223  
   224  	// pendingInputs is the total set of inputs the UtxoSweeper has been
   225  	// requested to sweep.
   226  	pendingInputs pendingInputs
   227  
   228  	// timer is the channel that signals expiry of the sweep batch timer.
   229  	timer <-chan time.Time
   230  
   231  	testSpendChan chan wire.OutPoint
   232  
   233  	currentOutputScript []byte
   234  
   235  	relayFeeRate chainfee.AtomPerKByte
   236  
   237  	// List of outputs being spent by the lastTx we loaded during startup.
   238  	// This is used to prevent generating a double spend of inputs.
   239  	startupPending map[wire.OutPoint]struct{}
   240  
   241  	quit chan struct{}
   242  	wg   sync.WaitGroup
   243  }
   244  
   245  // UtxoSweeperConfig contains dependencies of UtxoSweeper.
   246  type UtxoSweeperConfig struct {
   247  	// GenSweepScript generates a P2PKH script belonging to the wallet where
   248  	// funds can be swept.
   249  	GenSweepScript func() ([]byte, error)
   250  
   251  	// FeeEstimator is used when crafting sweep transactions to estimate
   252  	// the necessary fee relative to the expected size of the sweep
   253  	// transaction.
   254  	FeeEstimator chainfee.Estimator
   255  
   256  	// Wallet contains the wallet functions that sweeper requires.
   257  	Wallet Wallet
   258  
   259  	// NewBatchTimer creates a channel that will be sent on when a certain
   260  	// time window has passed. During this time window, new inputs can still
   261  	// be added to the sweep tx that is about to be generated.
   262  	NewBatchTimer func() <-chan time.Time
   263  
   264  	// Notifier is an instance of a chain notifier we'll use to watch for
   265  	// certain on-chain events.
   266  	Notifier chainntnfs.ChainNotifier
   267  
   268  	// Store stores the published sweeper txes.
   269  	Store SweeperStore
   270  
   271  	// Signer is used by the sweeper to generate valid witnesses at the
   272  	// time the incubated outputs need to be spent.
   273  	Signer input.Signer
   274  
   275  	// MaxInputsPerTx specifies the default maximum number of inputs allowed
   276  	// in a single sweep tx. If more need to be swept, multiple txes are
   277  	// created and published.
   278  	MaxInputsPerTx int
   279  
   280  	// MaxSweepAttempts specifies the maximum number of times an input is
   281  	// included in a publish attempt before giving up and returning an error
   282  	// to the caller.
   283  	MaxSweepAttempts int
   284  
   285  	// NextAttemptDeltaFunc returns given the number of already attempted
   286  	// sweeps, how many blocks to wait before retrying to sweep.
   287  	NextAttemptDeltaFunc func(int) int32
   288  
   289  	// NetParams stores the specific chain configuration parameters being
   290  	// used for this sweeper.
   291  	NetParams *chaincfg.Params
   292  
   293  	// MaxFeeRate is the the maximum fee rate allowed within the
   294  	// UtxoSweeper.
   295  	MaxFeeRate chainfee.AtomPerKByte
   296  
   297  	// FeeRateBucketSize is the default size of fee rate buckets we'll use
   298  	// when clustering inputs into buckets with similar fee rates within the
   299  	// UtxoSweeper.
   300  	//
   301  	// Given a minimum relay fee rate of 1 sat/vbyte, a fee rate bucket size
   302  	// of 10 would result in the following fee rate buckets up to the
   303  	// maximum fee rate:
   304  	//
   305  	//   #1: min = 1 sat/vbyte, max (exclusive) = 11 sat/vbyte
   306  	//   #2: min = 11 sat/vbyte, max (exclusive) = 21 sat/vbyte...
   307  	FeeRateBucketSize int
   308  }
   309  
   310  // Result is the struct that is pushed through the result channel. Callers can
   311  // use this to be informed of the final sweep result. In case of a remote
   312  // spend, Err will be ErrRemoteSpend.
   313  type Result struct {
   314  	// Err is the final result of the sweep. It is nil when the input is
   315  	// swept successfully by us. ErrRemoteSpend is returned when another
   316  	// party took the input.
   317  	Err error
   318  
   319  	// Tx is the transaction that spent the input.
   320  	Tx *wire.MsgTx
   321  }
   322  
   323  // sweepInputMessage structs are used in the internal channel between the
   324  // SweepInput call and the sweeper main loop.
   325  type sweepInputMessage struct {
   326  	input      input.Input
   327  	params     Params
   328  	resultChan chan Result
   329  }
   330  
   331  // New returns a new Sweeper instance.
   332  func New(cfg *UtxoSweeperConfig) *UtxoSweeper {
   333  	return &UtxoSweeper{
   334  		cfg:               cfg,
   335  		newInputs:         make(chan *sweepInputMessage),
   336  		spendChan:         make(chan *chainntnfs.SpendDetail),
   337  		updateReqs:        make(chan *updateReq),
   338  		pendingSweepsReqs: make(chan *pendingSweepsReq),
   339  		quit:              make(chan struct{}),
   340  		pendingInputs:     make(pendingInputs),
   341  		startupPending:    make(map[wire.OutPoint]struct{}),
   342  	}
   343  }
   344  
   345  // Start starts the process of constructing and publish sweep txes.
   346  func (s *UtxoSweeper) Start() error {
   347  	if !atomic.CompareAndSwapUint32(&s.started, 0, 1) {
   348  		return nil
   349  	}
   350  
   351  	log.Tracef("Sweeper starting")
   352  
   353  	// Retrieve last published tx from database.
   354  	lastTx, err := s.cfg.Store.GetLastPublishedTx()
   355  	if err != nil {
   356  		return fmt.Errorf("get last published tx: %v", err)
   357  	}
   358  
   359  	// Republish in case the previous call crashed lnd. We don't care about
   360  	// the return value, because inputs will be re-offered and retried
   361  	// anyway. The only reason we republish here is to prevent the corner
   362  	// case where lnd goes into a restart loop because of a crashing publish
   363  	// tx where we keep deriving new output script. By publishing and
   364  	// possibly crashing already now, we haven't derived a new output script
   365  	// yet.
   366  	if lastTx != nil {
   367  		log.Debugf("Publishing last tx %v", lastTx.TxHash())
   368  
   369  		// Error can be ignored. Because we are starting up, there are
   370  		// no pending inputs to update based on the publish result.
   371  		err := s.cfg.Wallet.PublishTransaction(lastTx, "")
   372  		if err != nil && err != lnwallet.ErrDoubleSpend {
   373  			log.Errorf("last tx publish: %v", err)
   374  		}
   375  
   376  		for _, in := range lastTx.TxIn {
   377  			s.startupPending[in.PreviousOutPoint] = struct{}{}
   378  		}
   379  	}
   380  
   381  	// Retrieve relay fee for dust limit calculation. Assume that this will
   382  	// not change from here on.
   383  	s.relayFeeRate = s.cfg.FeeEstimator.RelayFeePerKB()
   384  
   385  	// We need to register for block epochs and retry sweeping every block.
   386  	// We should get a notification with the current best block immediately
   387  	// if we don't provide any epoch. We'll wait for that in the collector.
   388  	blockEpochs, err := s.cfg.Notifier.RegisterBlockEpochNtfn(nil)
   389  	if err != nil {
   390  		return fmt.Errorf("register block epoch ntfn: %v", err)
   391  	}
   392  
   393  	// Start sweeper main loop.
   394  	s.wg.Add(1)
   395  	go func() {
   396  		defer blockEpochs.Cancel()
   397  		defer s.wg.Done()
   398  
   399  		s.collector(blockEpochs.Epochs)
   400  
   401  		// The collector exited and won't longer handle incoming
   402  		// requests. This can happen on shutdown, when the block
   403  		// notifier shuts down before the sweeper and its clients. In
   404  		// order to not deadlock the clients waiting for their requests
   405  		// being handled, we handle them here and immediately return an
   406  		// error. When the sweeper finally is shut down we can exit as
   407  		// the clients will be notified.
   408  		for {
   409  			select {
   410  			case inp := <-s.newInputs:
   411  				inp.resultChan <- Result{
   412  					Err: ErrSweeperShuttingDown,
   413  				}
   414  
   415  			case req := <-s.pendingSweepsReqs:
   416  				req.errChan <- ErrSweeperShuttingDown
   417  
   418  			case req := <-s.updateReqs:
   419  				req.responseChan <- &updateResp{
   420  					err: ErrSweeperShuttingDown,
   421  				}
   422  
   423  			case <-s.quit:
   424  				return
   425  			}
   426  		}
   427  	}()
   428  
   429  	return nil
   430  }
   431  
   432  // RelayFeePerKB returns the minimum fee rate required for transactions to be
   433  // relayed.
   434  func (s *UtxoSweeper) RelayFeePerKB() chainfee.AtomPerKByte {
   435  	return s.relayFeeRate
   436  }
   437  
   438  // Stop stops sweeper from listening to block epochs and constructing sweep
   439  // txes.
   440  func (s *UtxoSweeper) Stop() error {
   441  	if !atomic.CompareAndSwapUint32(&s.stopped, 0, 1) {
   442  		return nil
   443  	}
   444  
   445  	log.Info("Sweeper shutting down")
   446  
   447  	close(s.quit)
   448  	s.wg.Wait()
   449  
   450  	log.Debugf("Sweeper shut down")
   451  
   452  	return nil
   453  }
   454  
   455  // SweepInput sweeps inputs back into the wallet. The inputs will be batched and
   456  // swept after the batch time window ends. A custom fee preference can be
   457  // provided to determine what fee rate should be used for the input. Note that
   458  // the input may not always be swept with this exact value, as its possible for
   459  // it to be batched under the same transaction with other similar fee rate
   460  // inputs.
   461  //
   462  // NOTE: Extreme care needs to be taken that input isn't changed externally.
   463  // Because it is an interface and we don't know what is exactly behind it, we
   464  // cannot make a local copy in sweeper.
   465  func (s *UtxoSweeper) SweepInput(input input.Input,
   466  	params Params) (chan Result, error) {
   467  
   468  	if input == nil || input.OutPoint() == nil || input.SignDesc() == nil {
   469  		return nil, errors.New("nil input received")
   470  	}
   471  
   472  	// Ensure the client provided a sane fee preference.
   473  	if _, err := s.feeRateForPreference(params.Fee); err != nil {
   474  		return nil, err
   475  	}
   476  
   477  	absoluteTimeLock, _ := input.RequiredLockTime()
   478  	log.Infof("Sweep request received: out_point=%v, witness_type=%v, "+
   479  		"relative_time_lock=%v, absolute_time_lock=%v, amount=%v, "+
   480  		"params=(%v)", input.OutPoint(), input.WitnessType(),
   481  		input.BlocksToMaturity(), absoluteTimeLock,
   482  		dcrutil.Amount(input.SignDesc().Output.Value), params)
   483  
   484  	sweeperInput := &sweepInputMessage{
   485  		input:      input,
   486  		params:     params,
   487  		resultChan: make(chan Result, 1),
   488  	}
   489  
   490  	// Deliver input to the main event loop.
   491  	select {
   492  	case s.newInputs <- sweeperInput:
   493  	case <-s.quit:
   494  		return nil, ErrSweeperShuttingDown
   495  	}
   496  
   497  	return sweeperInput.resultChan, nil
   498  }
   499  
   500  // feeRateForPreference returns a fee rate for the given fee preference. It
   501  // ensures that the fee rate respects the bounds of the UtxoSweeper.
   502  func (s *UtxoSweeper) feeRateForPreference(
   503  	feePreference FeePreference) (chainfee.AtomPerKByte, error) {
   504  
   505  	// Ensure a type of fee preference is specified to prevent using a
   506  	// default below.
   507  	if feePreference.FeeRate == 0 && feePreference.ConfTarget == 0 {
   508  		return 0, ErrNoFeePreference
   509  	}
   510  
   511  	feeRate, err := DetermineFeePerKB(s.cfg.FeeEstimator, feePreference)
   512  	if err != nil {
   513  		return 0, err
   514  	}
   515  	if feeRate < s.relayFeeRate {
   516  		return 0, fmt.Errorf("fee preference resulted in invalid fee "+
   517  			"rate %v, minimum is %v", feeRate, s.relayFeeRate)
   518  	}
   519  	if feeRate > s.cfg.MaxFeeRate {
   520  		return 0, fmt.Errorf("fee preference resulted in invalid fee "+
   521  			"rate %v, maximum is %v", feeRate, s.cfg.MaxFeeRate)
   522  	}
   523  
   524  	return feeRate, nil
   525  }
   526  
   527  // removeLastSweepDescendants removes any transactions from the wallet that
   528  // spend outputs produced by the passed spendingTx. This needs to be done in
   529  // cases where we're not the only ones that can sweep an output, but there may
   530  // exist unconfirmed spends that spend outputs created by a sweep transaction.
   531  // The most common case for this is when someone sweeps our anchor outputs
   532  // after 16 blocks.
   533  func (s *UtxoSweeper) removeLastSweepDescendants(spendingTx *wire.MsgTx) error {
   534  	// Obtain all the past sweeps that we've done so far. We'll need these
   535  	// to ensure that if the spendingTx spends any of the same inputs, then
   536  	// we remove any transaction that may be spending those inputs from the
   537  	// wallet.
   538  	//
   539  	// TODO(roasbeef): can be last sweep here if we remove anything confirmed
   540  	// from the store?
   541  	pastSweepHashes, err := s.cfg.Store.ListSweeps()
   542  	if err != nil {
   543  		return err
   544  	}
   545  
   546  	log.Debugf("Attempting to remove descendant txns invalidated by "+
   547  		"(txid=%v): %v", spendingTx.TxHash(), spew.Sdump(spendingTx))
   548  
   549  	// Construct a map of the inputs this transaction spends for each look
   550  	// up.
   551  	inputsSpent := make(map[wire.OutPoint]struct{}, len(spendingTx.TxIn))
   552  	for _, txIn := range spendingTx.TxIn {
   553  		inputsSpent[txIn.PreviousOutPoint] = struct{}{}
   554  	}
   555  
   556  	// We'll now go through each past transaction we published during this
   557  	// epoch and cross reference the spent inputs. If there're any inputs
   558  	// in common with the inputs the spendingTx spent, then we'll remove
   559  	// those.
   560  	//
   561  	// TODO(roasbeef): need to start to remove all transaction hashes after
   562  	// every N blocks (assumed point of no return)
   563  	for _, sweepHash := range pastSweepHashes {
   564  		sweepTx, err := s.cfg.Wallet.FetchTx(sweepHash)
   565  		if err != nil {
   566  			return err
   567  		}
   568  
   569  		// Transaction wasn't found in the wallet, may have already
   570  		// been replaced/removed.
   571  		if sweepTx == nil {
   572  			continue
   573  		}
   574  
   575  		// Check to see if this past sweep transaction spent any of the
   576  		// same inputs as spendingTx.
   577  		var isConflicting bool
   578  		for _, txIn := range sweepTx.TxIn {
   579  			if _, ok := inputsSpent[txIn.PreviousOutPoint]; ok {
   580  				isConflicting = true
   581  				break
   582  			}
   583  		}
   584  
   585  		// If it did, then we'll signal the wallet to remove all the
   586  		// transactions that are descendants of outputs created by the
   587  		// sweepTx.
   588  		if isConflicting {
   589  			log.Debugf("Removing sweep txid=%v from wallet: %v",
   590  				sweepTx.TxHash(), spew.Sdump(sweepTx))
   591  
   592  			err := s.cfg.Wallet.RemoveDescendants(sweepTx)
   593  			if err != nil {
   594  				log.Warnf("unable to remove descendants: %v", err)
   595  			}
   596  		}
   597  	}
   598  
   599  	return nil
   600  }
   601  
   602  // collector is the sweeper main loop. It processes new inputs, spend
   603  // notifications and counts down to publication of the sweep tx.
   604  func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch) {
   605  	// We registered for the block epochs with a nil request. The notifier
   606  	// should send us the current best block immediately. So we need to wait
   607  	// for it here because we need to know the current best height.
   608  	var bestHeight int32
   609  	select {
   610  	case bestBlock := <-blockEpochs:
   611  		bestHeight = bestBlock.Height
   612  
   613  	case <-s.quit:
   614  		return
   615  	}
   616  
   617  	for {
   618  		select {
   619  		// A new inputs is offered to the sweeper. We check to see if we
   620  		// are already trying to sweep this input and if not, set up a
   621  		// listener to spend and schedule a sweep.
   622  		case input := <-s.newInputs:
   623  			outpoint := *input.input.OutPoint()
   624  			pendInput, pending := s.pendingInputs[outpoint]
   625  			if pending {
   626  				log.Debugf("Already pending input %v received",
   627  					outpoint)
   628  
   629  				// Before updating the input details, check if
   630  				// an exclusive group was set, and if so, assume
   631  				// this input as finalized and remove all other
   632  				// inputs belonging to the same exclusive group.
   633  				var prevExclGroup *uint64
   634  				if pendInput.params.ExclusiveGroup != nil &&
   635  					input.params.ExclusiveGroup == nil {
   636  					prevExclGroup = new(uint64)
   637  					*prevExclGroup = *pendInput.params.ExclusiveGroup
   638  				}
   639  
   640  				// Update input details and sweep parameters.
   641  				// The re-offered input details may contain a
   642  				// change to the unconfirmed parent tx info.
   643  				pendInput.params = input.params
   644  				pendInput.Input = input.input
   645  
   646  				// Add additional result channel to signal
   647  				// spend of this input.
   648  				pendInput.listeners = append(
   649  					pendInput.listeners, input.resultChan,
   650  				)
   651  
   652  				if prevExclGroup != nil {
   653  					s.removeExclusiveGroup(*prevExclGroup)
   654  				}
   655  
   656  				continue
   657  			}
   658  
   659  			minPublishHeight := bestHeight
   660  
   661  			// If this input was sent on our last startup tx, delay
   662  			// trying to sweep it again.
   663  			if _, ok := s.startupPending[outpoint]; ok {
   664  				attemptDelta := s.cfg.NextAttemptDeltaFunc(1)
   665  				minPublishHeight = bestHeight + attemptDelta
   666  				log.Debugf("Delaying startup outpoint %s by %d "+
   667  					"blocks (up to height %d)", outpoint,
   668  					attemptDelta, minPublishHeight)
   669  				delete(s.startupPending, outpoint)
   670  			}
   671  
   672  			// Create a new pendingInput and initialize the
   673  			// listeners slice with the passed in result channel. If
   674  			// this input is offered for sweep again, the result
   675  			// channel will be appended to this slice.
   676  			pendInput = &pendingInput{
   677  				listeners:        []chan Result{input.resultChan},
   678  				Input:            input.input,
   679  				minPublishHeight: minPublishHeight,
   680  				params:           input.params,
   681  			}
   682  			s.pendingInputs[outpoint] = pendInput
   683  
   684  			// Start watching for spend of this input, either by us
   685  			// or the remote party.
   686  			cancel, err := s.waitForSpend(
   687  				outpoint,
   688  				input.input.SignDesc().Output.PkScript,
   689  				input.input.HeightHint(),
   690  			)
   691  			if err != nil {
   692  				err := fmt.Errorf("wait for spend: %v", err)
   693  				s.signalAndRemove(&outpoint, Result{Err: err})
   694  				continue
   695  			}
   696  			pendInput.ntfnRegCancel = cancel
   697  
   698  			// Check to see if with this new input a sweep tx can be
   699  			// formed.
   700  			if err := s.scheduleSweep(bestHeight); err != nil {
   701  				log.Errorf("schedule sweep: %v", err)
   702  			}
   703  
   704  		// A spend of one of our inputs is detected. Signal sweep
   705  		// results to the caller(s).
   706  		case spend := <-s.spendChan:
   707  			// For testing purposes.
   708  			if s.testSpendChan != nil {
   709  				s.testSpendChan <- *spend.SpentOutPoint
   710  			}
   711  
   712  			// Query store to find out if we ever published this
   713  			// tx.
   714  			spendHash := *spend.SpenderTxHash
   715  			isOurTx, err := s.cfg.Store.IsOurTx(spendHash)
   716  			if err != nil {
   717  				log.Errorf("cannot determine if tx %v "+
   718  					"is ours: %v", spendHash, err,
   719  				)
   720  				continue
   721  			}
   722  
   723  			// If this isn't our transaction, it means someone else
   724  			// swept outputs that we were attempting to sweep. This
   725  			// can happen for anchor outputs as well as justice
   726  			// transactions. In this case, we'll notify the wallet
   727  			// to remove any spends that a descent from this
   728  			// output.
   729  			if !isOurTx {
   730  				err := s.removeLastSweepDescendants(
   731  					spend.SpendingTx,
   732  				)
   733  				if err != nil {
   734  					log.Warnf("unable to remove descendant "+
   735  						"transactions due to tx %v: ",
   736  						spendHash)
   737  				}
   738  
   739  				log.Debugf("Detected spend related to in flight inputs "+
   740  					"(is_ours=%v): %v",
   741  					newLogClosure(func() string {
   742  						spend.SpendingTx.CachedTxHash()
   743  						return spew.Sdump(spend.SpendingTx)
   744  					}), isOurTx,
   745  				)
   746  			}
   747  
   748  			// Clear out the last published tx since it either
   749  			// already got confirmed or something else made it
   750  			// invalid.
   751  			//
   752  			// Note(decred): This is used here because in SPV
   753  			// wallets we might end up with a tx that would never
   754  			// confirm and trying to publish it through the wallet
   755  			// causes it to keep this tx forever. This isn't
   756  			// strictly correct since we might have multiple
   757  			// pending txs in the sweeper but the store only
   758  			// currently holds on to the last one published.
   759  			err = s.cfg.Store.NotifyPublishTx(nil)
   760  			if err != nil {
   761  				log.Errorf("Error clearing last tx from store: %v", err)
   762  			}
   763  
   764  			// Signal sweep results for inputs in this confirmed
   765  			// tx.
   766  			for _, txIn := range spend.SpendingTx.TxIn {
   767  				outpoint := txIn.PreviousOutPoint
   768  
   769  				// Check if this input is known to us. It could
   770  				// probably be unknown if we canceled the
   771  				// registration, deleted from pendingInputs but
   772  				// the ntfn was in-flight already. Or this could
   773  				// be not one of our inputs.
   774  				input, ok := s.pendingInputs[outpoint]
   775  				if !ok {
   776  					continue
   777  				}
   778  
   779  				// Return either a nil or a remote spend result.
   780  				var err error
   781  				if !isOurTx {
   782  					err = ErrRemoteSpend
   783  				}
   784  
   785  				// Signal result channels.
   786  				s.signalAndRemove(&outpoint, Result{
   787  					Tx:  spend.SpendingTx,
   788  					Err: err,
   789  				})
   790  
   791  				// Remove all other inputs in this exclusive
   792  				// group.
   793  				if input.params.ExclusiveGroup != nil {
   794  					s.removeExclusiveGroup(
   795  						*input.params.ExclusiveGroup,
   796  					)
   797  				}
   798  			}
   799  
   800  			// Now that an input of ours is spent, we can try to
   801  			// resweep the remaining inputs.
   802  			if err := s.scheduleSweep(bestHeight); err != nil {
   803  				log.Errorf("schedule sweep: %v", err)
   804  			}
   805  
   806  		// A new external request has been received to retrieve all of
   807  		// the inputs we're currently attempting to sweep.
   808  		case req := <-s.pendingSweepsReqs:
   809  			req.respChan <- s.handlePendingSweepsReq(req)
   810  
   811  		// A new external request has been received to bump the fee rate
   812  		// of a given input.
   813  		case req := <-s.updateReqs:
   814  			resultChan, err := s.handleUpdateReq(req, bestHeight)
   815  			req.responseChan <- &updateResp{
   816  				resultChan: resultChan,
   817  				err:        err,
   818  			}
   819  
   820  		// The timer expires and we are going to (re)sweep.
   821  		case <-s.timer:
   822  			log.Debugf("Sweep timer expired")
   823  
   824  			// Set timer to nil so we know that a new timer needs to
   825  			// be started when new inputs arrive.
   826  			s.timer = nil
   827  
   828  			// We'll attempt to cluster all of our inputs with
   829  			// similar fee rates. Before attempting to sweep them,
   830  			// we'll sort them in descending fee rate order. We do
   831  			// this to ensure any inputs which have had their fee
   832  			// rate bumped are broadcast first in order enforce the
   833  			// RBF policy.
   834  			inputClusters := s.createInputClusters()
   835  			sort.Slice(inputClusters, func(i, j int) bool {
   836  				return inputClusters[i].sweepFeeRate >
   837  					inputClusters[j].sweepFeeRate
   838  			})
   839  			for _, cluster := range inputClusters {
   840  				err := s.sweepCluster(cluster, bestHeight)
   841  				if err != nil {
   842  					log.Errorf("input cluster sweep: %v",
   843  						err)
   844  				}
   845  			}
   846  
   847  		// A new block comes in. Things may have changed, so we retry a
   848  		// sweep.
   849  		case epoch, ok := <-blockEpochs:
   850  			if !ok {
   851  				return
   852  			}
   853  
   854  			bestHeight = epoch.Height
   855  
   856  			log.Debugf("New block: height=%v, sha=%v",
   857  				epoch.Height, epoch.Hash)
   858  
   859  			if err := s.scheduleSweep(bestHeight); err != nil {
   860  				log.Errorf("schedule sweep: %v", err)
   861  			}
   862  
   863  		case <-s.quit:
   864  			return
   865  		}
   866  	}
   867  }
   868  
   869  // removeExclusiveGroup removes all inputs in the given exclusive group. This
   870  // function is called when one of the exclusive group inputs has been spent. The
   871  // other inputs won't ever be spendable and can be removed. This also prevents
   872  // them from being part of future sweep transactions that would fail.
   873  func (s *UtxoSweeper) removeExclusiveGroup(group uint64) {
   874  	for outpoint, input := range s.pendingInputs {
   875  		outpoint := outpoint
   876  
   877  		// Skip inputs that aren't exclusive.
   878  		if input.params.ExclusiveGroup == nil {
   879  			continue
   880  		}
   881  
   882  		// Skip inputs from other exclusive groups.
   883  		if *input.params.ExclusiveGroup != group {
   884  			continue
   885  		}
   886  
   887  		// Signal result channels.
   888  		s.signalAndRemove(&outpoint, Result{
   889  			Err: ErrExclusiveGroupSpend,
   890  		})
   891  	}
   892  }
   893  
   894  // sweepCluster tries to sweep the given input cluster.
   895  func (s *UtxoSweeper) sweepCluster(cluster inputCluster,
   896  	currentHeight int32) error {
   897  
   898  	// Execute the sweep within a coin select lock. Otherwise the coins that
   899  	// we are going to spend may be selected for other transactions like
   900  	// funding of a channel.
   901  	return s.cfg.Wallet.WithCoinSelectLock(func() error {
   902  		// Examine pending inputs and try to construct
   903  		// lists of inputs.
   904  		inputLists, err := s.getInputLists(cluster, currentHeight)
   905  		if err != nil {
   906  			return fmt.Errorf("unable to examine pending inputs: %v", err)
   907  		}
   908  
   909  		// Sweep selected inputs.
   910  		for _, inputs := range inputLists {
   911  			err := s.sweep(inputs, cluster.sweepFeeRate, currentHeight)
   912  			if err != nil {
   913  				return fmt.Errorf("unable to sweep inputs: %v", err)
   914  			}
   915  		}
   916  
   917  		return nil
   918  	})
   919  }
   920  
   921  // bucketForFeeReate determines the proper bucket for a fee rate. This is done
   922  // in order to batch inputs with similar fee rates together.
   923  func (s *UtxoSweeper) bucketForFeeRate(
   924  	feeRate chainfee.AtomPerKByte) int {
   925  
   926  	// Create an isolated bucket for sweeps at the minimum fee rate. This is
   927  	// to prevent very small outputs (anchors) from becoming uneconomical if
   928  	// their fee rate would be averaged with higher fee rate inputs in a
   929  	// regular bucket.
   930  	if feeRate == s.relayFeeRate {
   931  		return 0
   932  	}
   933  
   934  	return 1 + int(feeRate-s.relayFeeRate)/s.cfg.FeeRateBucketSize
   935  }
   936  
   937  // createInputClusters creates a list of input clusters from the set of pending
   938  // inputs known by the UtxoSweeper. It clusters inputs by
   939  // 1) Required tx locktime
   940  // 2) Similar fee rates
   941  func (s *UtxoSweeper) createInputClusters() []inputCluster {
   942  	inputs := s.pendingInputs
   943  
   944  	// We start by getting the inputs clusters by locktime. Since the
   945  	// inputs commit to the locktime, they can only be clustered together
   946  	// if the locktime is equal.
   947  	lockTimeClusters, nonLockTimeInputs := s.clusterByLockTime(inputs)
   948  
   949  	// Cluster the the remaining inputs by sweep fee rate.
   950  	feeClusters := s.clusterBySweepFeeRate(nonLockTimeInputs)
   951  
   952  	// Since the inputs that we clustered by fee rate don't commit to a
   953  	// specific locktime, we can try to merge a locktime cluster with a fee
   954  	// cluster.
   955  	return zipClusters(lockTimeClusters, feeClusters)
   956  }
   957  
   958  // clusterByLockTime takes the given set of pending inputs and clusters those
   959  // with equal locktime together. Each cluster contains a sweep fee rate, which
   960  // is determined by calculating the average fee rate of all inputs within that
   961  // cluster. In addition to the created clusters, inputs that did not specify a
   962  // required lock time are returned.
   963  func (s *UtxoSweeper) clusterByLockTime(inputs pendingInputs) ([]inputCluster,
   964  	pendingInputs) {
   965  
   966  	locktimes := make(map[uint32]pendingInputs)
   967  	inputFeeRates := make(map[wire.OutPoint]chainfee.AtomPerKByte)
   968  	rem := make(pendingInputs)
   969  
   970  	// Go through all inputs and check if they require a certain locktime.
   971  	for op, input := range inputs {
   972  		lt, ok := input.RequiredLockTime()
   973  		if !ok {
   974  			rem[op] = input
   975  			continue
   976  		}
   977  
   978  		// Check if we already have inputs with this locktime.
   979  		p, ok := locktimes[lt]
   980  		if !ok {
   981  			p = make(pendingInputs)
   982  		}
   983  
   984  		p[op] = input
   985  		locktimes[lt] = p
   986  
   987  		// We also get the preferred fee rate for this input.
   988  		feeRate, err := s.feeRateForPreference(input.params.Fee)
   989  		if err != nil {
   990  			log.Warnf("Skipping input %v: %v", op, err)
   991  			continue
   992  		}
   993  
   994  		input.lastFeeRate = feeRate
   995  		inputFeeRates[op] = feeRate
   996  	}
   997  
   998  	// We'll then determine the sweep fee rate for each set of inputs by
   999  	// calculating the average fee rate of the inputs within each set.
  1000  	inputClusters := make([]inputCluster, 0, len(locktimes))
  1001  	for lt, inputs := range locktimes {
  1002  		lt := lt
  1003  
  1004  		var sweepFeeRate chainfee.AtomPerKByte
  1005  		for op := range inputs {
  1006  			sweepFeeRate += inputFeeRates[op]
  1007  		}
  1008  
  1009  		sweepFeeRate /= chainfee.AtomPerKByte(len(inputs))
  1010  		inputClusters = append(inputClusters, inputCluster{
  1011  			lockTime:     &lt,
  1012  			sweepFeeRate: sweepFeeRate,
  1013  			inputs:       inputs,
  1014  		})
  1015  	}
  1016  
  1017  	return inputClusters, rem
  1018  }
  1019  
  1020  // clusterBySweepFeeRate takes the set of pending inputs within the UtxoSweeper
  1021  // and clusters those together with similar fee rates. Each cluster contains a
  1022  // sweep fee rate, which is determined by calculating the average fee rate of
  1023  // all inputs within that cluster.
  1024  func (s *UtxoSweeper) clusterBySweepFeeRate(inputs pendingInputs) []inputCluster {
  1025  	bucketInputs := make(map[int]*bucketList)
  1026  	inputFeeRates := make(map[wire.OutPoint]chainfee.AtomPerKByte)
  1027  
  1028  	// First, we'll group together all inputs with similar fee rates. This
  1029  	// is done by determining the fee rate bucket they should belong in.
  1030  	for op, input := range inputs {
  1031  		feeRate, err := s.feeRateForPreference(input.params.Fee)
  1032  		if err != nil {
  1033  			log.Warnf("Skipping input %v: %v", op, err)
  1034  			continue
  1035  		}
  1036  
  1037  		// Only try to sweep inputs with an unconfirmed parent if the
  1038  		// current sweep fee rate exceeds the parent tx fee rate. This
  1039  		// assumes that such inputs are offered to the sweeper solely
  1040  		// for the purpose of anchoring down the parent tx using cpfp.
  1041  		parentTx := input.UnconfParent()
  1042  		if parentTx != nil {
  1043  			parentFeeRate :=
  1044  				chainfee.AtomPerKByte(parentTx.Fee*1000) /
  1045  					chainfee.AtomPerKByte(parentTx.Size)
  1046  
  1047  			if parentFeeRate >= feeRate {
  1048  				log.Debugf("Skipping cpfp input %v: fee_rate=%v, "+
  1049  					"parent_fee_rate=%v", op, feeRate,
  1050  					parentFeeRate)
  1051  
  1052  				continue
  1053  			}
  1054  		}
  1055  
  1056  		feeGroup := s.bucketForFeeRate(feeRate)
  1057  
  1058  		// Create a bucket list for this fee rate if there isn't one
  1059  		// yet.
  1060  		buckets, ok := bucketInputs[feeGroup]
  1061  		if !ok {
  1062  			buckets = &bucketList{}
  1063  			bucketInputs[feeGroup] = buckets
  1064  		}
  1065  
  1066  		// Request the bucket list to add this input. The bucket list
  1067  		// will take into account exclusive group constraints.
  1068  		buckets.add(input)
  1069  
  1070  		input.lastFeeRate = feeRate
  1071  		inputFeeRates[op] = feeRate
  1072  	}
  1073  
  1074  	// We'll then determine the sweep fee rate for each set of inputs by
  1075  	// calculating the average fee rate of the inputs within each set.
  1076  	inputClusters := make([]inputCluster, 0, len(bucketInputs))
  1077  	for _, buckets := range bucketInputs {
  1078  		for _, inputs := range buckets.buckets {
  1079  			var sweepFeeRate chainfee.AtomPerKByte
  1080  			for op := range inputs {
  1081  				sweepFeeRate += inputFeeRates[op]
  1082  			}
  1083  			sweepFeeRate /= chainfee.AtomPerKByte(len(inputs))
  1084  			inputClusters = append(inputClusters, inputCluster{
  1085  				sweepFeeRate: sweepFeeRate,
  1086  				inputs:       inputs,
  1087  			})
  1088  		}
  1089  	}
  1090  
  1091  	return inputClusters
  1092  }
  1093  
  1094  // zipClusters merges pairwise clusters from as and bs such that cluster a from
  1095  // as is merged with a cluster from bs that has at least the fee rate of a.
  1096  // This to ensure we don't delay confirmation by decreasing the fee rate (the
  1097  // lock time inputs are typically second level HTLC transactions, that are time
  1098  // sensitive).
  1099  func zipClusters(as, bs []inputCluster) []inputCluster {
  1100  	// Sort the clusters by decreasing fee rates.
  1101  	sort.Slice(as, func(i, j int) bool {
  1102  		return as[i].sweepFeeRate >
  1103  			as[j].sweepFeeRate
  1104  	})
  1105  	sort.Slice(bs, func(i, j int) bool {
  1106  		return bs[i].sweepFeeRate >
  1107  			bs[j].sweepFeeRate
  1108  	})
  1109  
  1110  	var (
  1111  		finalClusters []inputCluster
  1112  		j             int
  1113  	)
  1114  
  1115  	// Go through each cluster in as, and merge with the next one from bs
  1116  	// if it has at least the fee rate needed.
  1117  	for i := range as {
  1118  		a := as[i]
  1119  
  1120  		switch {
  1121  
  1122  		// If the fee rate for the next one from bs is at least a's, we
  1123  		// merge.
  1124  		case j < len(bs) && bs[j].sweepFeeRate >= a.sweepFeeRate:
  1125  			merged := mergeClusters(a, bs[j])
  1126  			finalClusters = append(finalClusters, merged...)
  1127  
  1128  			// Increment j for the next round.
  1129  			j++
  1130  
  1131  		// We did not merge, meaning all the remining clusters from bs
  1132  		// have lower fee rate. Instead we add a directly to the final
  1133  		// clusters.
  1134  		default:
  1135  			finalClusters = append(finalClusters, a)
  1136  		}
  1137  	}
  1138  
  1139  	// Add any remaining clusters from bs.
  1140  	for ; j < len(bs); j++ {
  1141  		b := bs[j]
  1142  		finalClusters = append(finalClusters, b)
  1143  	}
  1144  
  1145  	return finalClusters
  1146  }
  1147  
  1148  // mergeClusters attempts to merge cluster a and b if they are compatible. The
  1149  // new cluster will have the locktime set if a or b had a locktime set, and a
  1150  // sweep fee rate that is the maximum of a and b's. If the two clusters are not
  1151  // compatible, they will be returned unchanged.
  1152  func mergeClusters(a, b inputCluster) []inputCluster {
  1153  	newCluster := inputCluster{}
  1154  
  1155  	switch {
  1156  
  1157  	// Incompatible locktimes, return the sets without merging them.
  1158  	case a.lockTime != nil && b.lockTime != nil && *a.lockTime != *b.lockTime:
  1159  		return []inputCluster{a, b}
  1160  
  1161  	case a.lockTime != nil:
  1162  		newCluster.lockTime = a.lockTime
  1163  
  1164  	case b.lockTime != nil:
  1165  		newCluster.lockTime = b.lockTime
  1166  	}
  1167  
  1168  	if a.sweepFeeRate > b.sweepFeeRate {
  1169  		newCluster.sweepFeeRate = a.sweepFeeRate
  1170  	} else {
  1171  		newCluster.sweepFeeRate = b.sweepFeeRate
  1172  	}
  1173  
  1174  	newCluster.inputs = make(pendingInputs)
  1175  
  1176  	for op, in := range a.inputs {
  1177  		newCluster.inputs[op] = in
  1178  	}
  1179  
  1180  	for op, in := range b.inputs {
  1181  		newCluster.inputs[op] = in
  1182  	}
  1183  
  1184  	return []inputCluster{newCluster}
  1185  }
  1186  
  1187  // scheduleSweep starts the sweep timer to create an opportunity for more inputs
  1188  // to be added.
  1189  func (s *UtxoSweeper) scheduleSweep(currentHeight int32) error {
  1190  	// The timer is already ticking, no action needed for the sweep to
  1191  	// happen.
  1192  	if s.timer != nil {
  1193  		log.Debugf("Timer still ticking")
  1194  		return nil
  1195  	}
  1196  
  1197  	// We'll only start our timer once we have inputs we're able to sweep.
  1198  	startTimer := false
  1199  	for _, cluster := range s.createInputClusters() {
  1200  		// Examine pending inputs and try to construct lists of inputs.
  1201  		// We don't need to obtain the coin selection lock, because we
  1202  		// just need an indication as to whether we can sweep. More
  1203  		// inputs may be added until we publish the transaction and
  1204  		// coins that we select now may be used in other transactions.
  1205  		inputLists, err := s.getInputLists(cluster, currentHeight)
  1206  		if err != nil {
  1207  			return fmt.Errorf("get input lists: %v", err)
  1208  		}
  1209  
  1210  		log.Infof("Sweep candidates at height=%v with fee_rate=%v, "+
  1211  			"yield %v distinct txns", currentHeight,
  1212  			cluster.sweepFeeRate, len(inputLists))
  1213  
  1214  		if len(inputLists) != 0 {
  1215  			startTimer = true
  1216  			break
  1217  		}
  1218  	}
  1219  	if !startTimer {
  1220  		return nil
  1221  	}
  1222  
  1223  	// Start sweep timer to create opportunity for more inputs to be added
  1224  	// before a tx is constructed.
  1225  	s.timer = s.cfg.NewBatchTimer()
  1226  
  1227  	log.Debugf("Sweep timer started")
  1228  
  1229  	return nil
  1230  }
  1231  
  1232  // signalAndRemove notifies the listeners of the final result of the input
  1233  // sweep. It cancels any pending spend notification and removes the input from
  1234  // the list of pending inputs. When this function returns, the sweeper has
  1235  // completely forgotten about the input.
  1236  func (s *UtxoSweeper) signalAndRemove(outpoint *wire.OutPoint, result Result) {
  1237  	pendInput := s.pendingInputs[*outpoint]
  1238  	listeners := pendInput.listeners
  1239  
  1240  	if result.Err == nil {
  1241  		log.Debugf("Dispatching sweep success for %v to %v listeners",
  1242  			outpoint, len(listeners),
  1243  		)
  1244  	} else {
  1245  		log.Debugf("Dispatching sweep error for %v to %v listeners: %v",
  1246  			outpoint, len(listeners), result.Err,
  1247  		)
  1248  
  1249  		if err := s.cfg.Wallet.AbandonDoubleSpends(outpoint); err != nil {
  1250  			log.Warnf("Error abandoning double of %s: %v", outpoint, err)
  1251  		}
  1252  	}
  1253  
  1254  	// Signal all listeners. Channel is buffered. Because we only send once
  1255  	// on every channel, it should never block.
  1256  	for _, resultChan := range listeners {
  1257  		resultChan <- result
  1258  	}
  1259  
  1260  	// Cancel spend notification with chain notifier. This is not necessary
  1261  	// in case of a success, except for that a reorg could still happen.
  1262  	if pendInput.ntfnRegCancel != nil {
  1263  		log.Debugf("Canceling spend ntfn for %v", outpoint)
  1264  
  1265  		pendInput.ntfnRegCancel()
  1266  	}
  1267  
  1268  	// Inputs are no longer pending after result has been sent.
  1269  	delete(s.pendingInputs, *outpoint)
  1270  	delete(s.startupPending, *outpoint)
  1271  }
  1272  
  1273  // getInputLists goes through the given inputs and constructs multiple distinct
  1274  // sweep lists with the given fee rate, each up to the configured maximum
  1275  // number of inputs. Negative yield inputs are skipped. Transactions with an
  1276  // output below the dust limit are not published. Those inputs remain pending
  1277  // and will be bundled with future inputs if possible.
  1278  func (s *UtxoSweeper) getInputLists(cluster inputCluster,
  1279  	currentHeight int32) ([]inputSet, error) {
  1280  
  1281  	// Filter for inputs that need to be swept. Create two lists: all
  1282  	// sweepable inputs and a list containing only the new, never tried
  1283  	// inputs.
  1284  	//
  1285  	// We want to create as large a tx as possible, so we return a final set
  1286  	// list that starts with sets created from all inputs. However, there is
  1287  	// a chance that those txes will not publish, because they already
  1288  	// contain inputs that failed before. Therefore we also add sets
  1289  	// consisting of only new inputs to the list, to make sure that new
  1290  	// inputs are given a good, isolated chance of being published.
  1291  	var newInputs, retryInputs []txInput
  1292  	for _, input := range cluster.inputs {
  1293  		// Skip inputs that have a minimum publish height that is not
  1294  		// yet reached.
  1295  		if input.minPublishHeight > currentHeight {
  1296  			continue
  1297  		}
  1298  
  1299  		// Add input to the either one of the lists.
  1300  		if input.publishAttempts == 0 {
  1301  			newInputs = append(newInputs, input)
  1302  		} else {
  1303  			retryInputs = append(retryInputs, input)
  1304  		}
  1305  	}
  1306  
  1307  	// If there is anything to retry, combine it with the new inputs and
  1308  	// form input sets.
  1309  	var allSets []inputSet
  1310  	if len(retryInputs) > 0 {
  1311  		var err error
  1312  		allSets, err = generateInputPartitionings(
  1313  			append(retryInputs, newInputs...),
  1314  			cluster.sweepFeeRate, s.cfg.MaxInputsPerTx,
  1315  			s.cfg.Wallet,
  1316  		)
  1317  		if err != nil {
  1318  			return nil, fmt.Errorf("input partitionings: %v", err)
  1319  		}
  1320  	}
  1321  
  1322  	// Create sets for just the new inputs.
  1323  	newSets, err := generateInputPartitionings(
  1324  		newInputs, cluster.sweepFeeRate, s.cfg.MaxInputsPerTx,
  1325  		s.cfg.Wallet,
  1326  	)
  1327  	if err != nil {
  1328  		return nil, fmt.Errorf("input partitionings: %v", err)
  1329  	}
  1330  
  1331  	log.Debugf("Sweep candidates at height=%v: total_num_pending=%v, "+
  1332  		"total_num_new=%v", currentHeight, len(allSets), len(newSets))
  1333  
  1334  	// Append the new sets at the end of the list, because those tx likely
  1335  	// have a higher fee per input.
  1336  	return append(allSets, newSets...), nil
  1337  }
  1338  
  1339  // sweep takes a set of preselected inputs, creates a sweep tx and publishes the
  1340  // tx. The output address is only marked as used if the publish succeeds.
  1341  func (s *UtxoSweeper) sweep(inputs inputSet, feeRate chainfee.AtomPerKByte,
  1342  	currentHeight int32) error {
  1343  
  1344  	// Generate an output script if there isn't an unused script available.
  1345  	if s.currentOutputScript == nil {
  1346  		pkScript, err := s.cfg.GenSweepScript()
  1347  		if err != nil {
  1348  			return fmt.Errorf("gen sweep script: %v", err)
  1349  		}
  1350  		s.currentOutputScript = pkScript
  1351  	}
  1352  
  1353  	// Create sweep tx.
  1354  	tx, err := createSweepTx(
  1355  		inputs, nil, s.currentOutputScript, uint32(currentHeight),
  1356  		feeRate, s.cfg.Signer, s.cfg.NetParams,
  1357  	)
  1358  	if err != nil {
  1359  		return fmt.Errorf("create sweep tx: %v", err)
  1360  	}
  1361  
  1362  	// Add tx before publication, so that we will always know that a spend
  1363  	// by this tx is ours. Otherwise if the publish doesn't return, but did
  1364  	// publish, we loose track of this tx. Even republication on startup
  1365  	// doesn't prevent this, because that call returns a double spend error
  1366  	// then and would also not add the hash to the store.
  1367  	err = s.cfg.Store.NotifyPublishTx(tx)
  1368  	if err != nil {
  1369  		return fmt.Errorf("notify publish tx: %v", err)
  1370  	}
  1371  
  1372  	// Publish sweep tx.
  1373  	log.Debugf("Publishing sweep tx %v, num_inputs=%v, height=%v",
  1374  		tx.TxHash(), len(tx.TxIn), currentHeight)
  1375  
  1376  	log.Tracef("Sweep tx at height=%v: %v", currentHeight,
  1377  		newLogClosure(func() string {
  1378  			tx.CachedTxHash()
  1379  			return spew.Sdump(tx)
  1380  		}),
  1381  	)
  1382  
  1383  	err = s.cfg.Wallet.PublishTransaction(
  1384  		tx, labels.MakeLabel(labels.LabelTypeSweepTransaction, nil),
  1385  	)
  1386  
  1387  	// In case of an unexpected error, don't try to recover.
  1388  	if err != nil && err != lnwallet.ErrDoubleSpend {
  1389  		return fmt.Errorf("publish tx: %v", err)
  1390  	}
  1391  
  1392  	// Keep the output script in case of an error, so that it can be reused
  1393  	// for the next transaction and causes no address inflation.
  1394  	if err == nil {
  1395  		s.currentOutputScript = nil
  1396  	}
  1397  
  1398  	// Reschedule sweep.
  1399  	for _, input := range tx.TxIn {
  1400  		pi, ok := s.pendingInputs[input.PreviousOutPoint]
  1401  		if !ok {
  1402  			// It can be that the input has been removed because it
  1403  			// exceed the maximum number of attempts in a previous
  1404  			// input set. It could also be that this input is an
  1405  			// additional wallet input that was attached. In that
  1406  			// case there also isn't a pending input to update.
  1407  			continue
  1408  		}
  1409  
  1410  		// Record another publish attempt.
  1411  		pi.publishAttempts++
  1412  
  1413  		// We don't care what the result of the publish call was. Even
  1414  		// if it is published successfully, it can still be that it
  1415  		// needs to be retried. Call NextAttemptDeltaFunc to calculate
  1416  		// when to resweep this input.
  1417  		nextAttemptDelta := s.cfg.NextAttemptDeltaFunc(
  1418  			pi.publishAttempts,
  1419  		)
  1420  
  1421  		pi.minPublishHeight = currentHeight + nextAttemptDelta
  1422  
  1423  		log.Debugf("Rescheduling input %v after %v attempts at "+
  1424  			"height %v (delta %v)", input.PreviousOutPoint,
  1425  			pi.publishAttempts, pi.minPublishHeight,
  1426  			nextAttemptDelta)
  1427  
  1428  		if pi.publishAttempts >= s.cfg.MaxSweepAttempts {
  1429  			// Signal result channels sweep result.
  1430  			s.signalAndRemove(&input.PreviousOutPoint, Result{
  1431  				Err: ErrTooManyAttempts,
  1432  			})
  1433  		}
  1434  	}
  1435  
  1436  	return nil
  1437  }
  1438  
  1439  // waitForSpend registers a spend notification with the chain notifier. It
  1440  // returns a cancel function that can be used to cancel the registration.
  1441  func (s *UtxoSweeper) waitForSpend(outpoint wire.OutPoint,
  1442  	script []byte, heightHint uint32) (func(), error) {
  1443  
  1444  	log.Debugf("Wait for spend of %v", outpoint)
  1445  
  1446  	spendEvent, err := s.cfg.Notifier.RegisterSpendNtfn(
  1447  		&outpoint, script, heightHint,
  1448  	)
  1449  	if err != nil {
  1450  		return nil, fmt.Errorf("register spend ntfn: %v", err)
  1451  	}
  1452  
  1453  	s.wg.Add(1)
  1454  	go func() {
  1455  		defer s.wg.Done()
  1456  		select {
  1457  		case spend, ok := <-spendEvent.Spend:
  1458  			if !ok {
  1459  				log.Debugf("Spend ntfn for %v canceled",
  1460  					outpoint)
  1461  				return
  1462  			}
  1463  
  1464  			log.Debugf("Delivering spend ntfn for %v",
  1465  				outpoint)
  1466  			select {
  1467  			case s.spendChan <- spend:
  1468  				log.Debugf("Delivered spend ntfn for %v",
  1469  					outpoint)
  1470  
  1471  			case <-s.quit:
  1472  			}
  1473  		case <-s.quit:
  1474  		}
  1475  	}()
  1476  
  1477  	return spendEvent.Cancel, nil
  1478  }
  1479  
  1480  // PendingInputs returns the set of inputs that the UtxoSweeper is currently
  1481  // attempting to sweep.
  1482  func (s *UtxoSweeper) PendingInputs() (map[wire.OutPoint]*PendingInput, error) {
  1483  	respChan := make(chan map[wire.OutPoint]*PendingInput, 1)
  1484  	errChan := make(chan error, 1)
  1485  	select {
  1486  	case s.pendingSweepsReqs <- &pendingSweepsReq{
  1487  		respChan: respChan,
  1488  		errChan:  errChan,
  1489  	}:
  1490  	case <-s.quit:
  1491  		return nil, ErrSweeperShuttingDown
  1492  	}
  1493  
  1494  	select {
  1495  	case pendingSweeps := <-respChan:
  1496  		return pendingSweeps, nil
  1497  	case err := <-errChan:
  1498  		return nil, err
  1499  	case <-s.quit:
  1500  		return nil, ErrSweeperShuttingDown
  1501  	}
  1502  }
  1503  
  1504  // handlePendingSweepsReq handles a request to retrieve all pending inputs the
  1505  // UtxoSweeper is attempting to sweep.
  1506  func (s *UtxoSweeper) handlePendingSweepsReq(
  1507  	req *pendingSweepsReq) map[wire.OutPoint]*PendingInput {
  1508  
  1509  	pendingInputs := make(map[wire.OutPoint]*PendingInput, len(s.pendingInputs))
  1510  	for _, pendingInput := range s.pendingInputs {
  1511  		// Only the exported fields are set, as we expect the response
  1512  		// to only be consumed externally.
  1513  		op := *pendingInput.OutPoint()
  1514  		pendingInputs[op] = &PendingInput{
  1515  			OutPoint:    op,
  1516  			WitnessType: pendingInput.WitnessType(),
  1517  			Amount: dcrutil.Amount(
  1518  				pendingInput.SignDesc().Output.Value,
  1519  			),
  1520  			LastFeeRate:         pendingInput.lastFeeRate,
  1521  			BroadcastAttempts:   pendingInput.publishAttempts,
  1522  			NextBroadcastHeight: uint32(pendingInput.minPublishHeight),
  1523  			Params:              pendingInput.params,
  1524  		}
  1525  	}
  1526  
  1527  	return pendingInputs
  1528  }
  1529  
  1530  // UpdateParams allows updating the sweep parameters of a pending input in the
  1531  // UtxoSweeper. This function can be used to provide an updated fee preference
  1532  // and force flag that will be used for a new sweep transaction of the input
  1533  // that will act as a replacement transaction (RBF) of the original sweeping
  1534  // transaction, if any. The exclusive group is left unchanged.
  1535  //
  1536  // NOTE: This currently doesn't do any fee rate validation to ensure that a bump
  1537  // is actually successful. The responsibility of doing so should be handled by
  1538  // the caller.
  1539  func (s *UtxoSweeper) UpdateParams(input wire.OutPoint,
  1540  	params ParamsUpdate) (chan Result, error) {
  1541  
  1542  	// Ensure the client provided a sane fee preference.
  1543  	if _, err := s.feeRateForPreference(params.Fee); err != nil {
  1544  		return nil, err
  1545  	}
  1546  
  1547  	responseChan := make(chan *updateResp, 1)
  1548  	select {
  1549  	case s.updateReqs <- &updateReq{
  1550  		input:        input,
  1551  		params:       params,
  1552  		responseChan: responseChan,
  1553  	}:
  1554  	case <-s.quit:
  1555  		return nil, ErrSweeperShuttingDown
  1556  	}
  1557  
  1558  	select {
  1559  	case response := <-responseChan:
  1560  		return response.resultChan, response.err
  1561  	case <-s.quit:
  1562  		return nil, ErrSweeperShuttingDown
  1563  	}
  1564  }
  1565  
  1566  // handleUpdateReq handles an update request by simply updating the sweep
  1567  // parameters of the pending input. Currently, no validation is done on the new
  1568  // fee preference to ensure it will properly create a replacement transaction.
  1569  //
  1570  // TODO(wilmer):
  1571  //   - Validate fee preference to ensure we'll create a valid replacement
  1572  //     transaction to allow the new fee rate to propagate throughout the
  1573  //     network.
  1574  //   - Ensure we don't combine this input with any other unconfirmed inputs that
  1575  //     did not exist in the original sweep transaction, resulting in an invalid
  1576  //     replacement transaction.
  1577  func (s *UtxoSweeper) handleUpdateReq(req *updateReq, bestHeight int32) (
  1578  	chan Result, error) {
  1579  
  1580  	// If the UtxoSweeper is already trying to sweep this input, then we can
  1581  	// simply just increase its fee rate. This will allow the input to be
  1582  	// batched with others which also have a similar fee rate, creating a
  1583  	// higher fee rate transaction that replaces the original input's
  1584  	// sweeping transaction.
  1585  	pendingInput, ok := s.pendingInputs[req.input]
  1586  	if !ok {
  1587  		return nil, lnwallet.ErrNotMine
  1588  	}
  1589  
  1590  	// Create the updated parameters struct. Leave the exclusive group
  1591  	// unchanged.
  1592  	newParams := pendingInput.params
  1593  	newParams.Fee = req.params.Fee
  1594  	newParams.Force = req.params.Force
  1595  
  1596  	log.Debugf("Updating sweep parameters for %v from %v to %v", req.input,
  1597  		pendingInput.params, newParams)
  1598  
  1599  	pendingInput.params = newParams
  1600  
  1601  	// We'll reset the input's publish height to the current so that a new
  1602  	// transaction can be created that replaces the transaction currently
  1603  	// spending the input. We only do this for inputs that have been
  1604  	// broadcast at least once to ensure we don't spend an input before its
  1605  	// maturity height.
  1606  	//
  1607  	// NOTE: The UtxoSweeper is not yet offered time-locked inputs, so the
  1608  	// check for broadcast attempts is redundant at the moment.
  1609  	if pendingInput.publishAttempts > 0 {
  1610  		pendingInput.minPublishHeight = bestHeight
  1611  	}
  1612  
  1613  	if err := s.scheduleSweep(bestHeight); err != nil {
  1614  		log.Errorf("Unable to schedule sweep: %v", err)
  1615  	}
  1616  
  1617  	resultChan := make(chan Result, 1)
  1618  	pendingInput.listeners = append(pendingInput.listeners, resultChan)
  1619  
  1620  	return resultChan, nil
  1621  }
  1622  
  1623  // CreateSweepTx accepts a list of inputs and signs and generates a txn that
  1624  // spends from them. This method also makes an accurate fee estimate before
  1625  // generating the required witnesses.
  1626  //
  1627  // The created transaction has a single output sending all the funds back to
  1628  // the source wallet, after accounting for the fee estimate.
  1629  //
  1630  // The value of currentBlockHeight argument will be set as the tx locktime.
  1631  // This function assumes that all CLTV inputs will be unlocked after
  1632  // currentBlockHeight. Reasons not to use the maximum of all actual CLTV expiry
  1633  // values of the inputs:
  1634  //
  1635  // - Make handling re-orgs easier.
  1636  // - Thwart future possible fee sniping attempts.
  1637  // - Make us blend in with the bitcoind wallet.
  1638  func (s *UtxoSweeper) CreateSweepTx(inputs []input.Input, feePref FeePreference,
  1639  	currentBlockHeight uint32) (*wire.MsgTx, error) {
  1640  
  1641  	feePerKB, err := DetermineFeePerKB(s.cfg.FeeEstimator, feePref)
  1642  	if err != nil {
  1643  		return nil, err
  1644  	}
  1645  
  1646  	// Generate the receiving script to which the funds will be swept.
  1647  	pkScript, err := s.cfg.GenSweepScript()
  1648  	if err != nil {
  1649  		return nil, err
  1650  	}
  1651  
  1652  	return createSweepTx(
  1653  		inputs, nil, pkScript, currentBlockHeight, feePerKB,
  1654  		s.cfg.Signer, s.cfg.NetParams,
  1655  	)
  1656  }
  1657  
  1658  // DefaultNextAttemptDeltaFunc is the default calculation for next sweep attempt
  1659  // scheduling. It implements exponential back-off with some randomness. This is
  1660  // to prevent a stuck tx (for example because fee is too low and can't be bumped
  1661  // in dcrd) from blocking all other retried inputs in the same tx.
  1662  func DefaultNextAttemptDeltaFunc(attempts int) int32 {
  1663  	return 2 + rand.Int31n(1<<uint(attempts-1))
  1664  }
  1665  
  1666  // ListSweeps returns a list of the the sweeps recorded by the sweep store.
  1667  func (s *UtxoSweeper) ListSweeps() ([]chainhash.Hash, error) {
  1668  	return s.cfg.Store.ListSweeps()
  1669  }
  1670  
  1671  // init initializes the random generator for random input rescheduling.
  1672  func init() {
  1673  	rand.Seed(time.Now().Unix())
  1674  }