github.com/decred/dcrlnd@v0.7.6/watchtower/wtclient/client.go (about)

     1  package wtclient
     2  
     3  import (
     4  	"bytes"
     5  	"errors"
     6  	"fmt"
     7  	"net"
     8  	"sync"
     9  	"time"
    10  
    11  	"github.com/decred/dcrd/chaincfg/chainhash"
    12  	"github.com/decred/dcrd/chaincfg/v3"
    13  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    14  	"github.com/decred/dcrlnd/build"
    15  	"github.com/decred/dcrlnd/channeldb"
    16  	"github.com/decred/dcrlnd/input"
    17  	"github.com/decred/dcrlnd/keychain"
    18  	"github.com/decred/dcrlnd/lnwallet"
    19  	"github.com/decred/dcrlnd/lnwire"
    20  	"github.com/decred/dcrlnd/tor"
    21  	"github.com/decred/dcrlnd/watchtower/wtdb"
    22  	"github.com/decred/dcrlnd/watchtower/wtpolicy"
    23  	"github.com/decred/dcrlnd/watchtower/wtserver"
    24  	"github.com/decred/dcrlnd/watchtower/wtwire"
    25  	"github.com/decred/slog"
    26  )
    27  
    28  const (
    29  	// DefaultReadTimeout specifies the default duration we will wait during
    30  	// a read before breaking out of a blocking read.
    31  	DefaultReadTimeout = 15 * time.Second
    32  
    33  	// DefaultWriteTimeout specifies the default duration we will wait during
    34  	// a write before breaking out of a blocking write.
    35  	DefaultWriteTimeout = 15 * time.Second
    36  
    37  	// DefaultStatInterval specifies the default interval between logging
    38  	// metrics about the client's operation.
    39  	DefaultStatInterval = time.Minute
    40  
    41  	// DefaultForceQuitDelay specifies the default duration after which the
    42  	// client should abandon any pending updates or session negotiations
    43  	// before terminating.
    44  	DefaultForceQuitDelay = 10 * time.Second
    45  )
    46  
    47  // genActiveSessionFilter generates a filter that selects active sessions that
    48  // also match the desired channel type, either legacy or anchor.
    49  func genActiveSessionFilter(anchor bool) func(*wtdb.ClientSession) bool {
    50  	return func(s *wtdb.ClientSession) bool {
    51  		return s.Status == wtdb.CSessionActive &&
    52  			anchor == s.Policy.IsAnchorChannel()
    53  	}
    54  }
    55  
    56  // RegisteredTower encompasses information about a registered watchtower with
    57  // the client.
    58  type RegisteredTower struct {
    59  	*wtdb.Tower
    60  
    61  	// Sessions is the set of sessions corresponding to the watchtower.
    62  	Sessions map[wtdb.SessionID]*wtdb.ClientSession
    63  
    64  	// ActiveSessionCandidate determines whether the watchtower is currently
    65  	// being considered for new sessions.
    66  	ActiveSessionCandidate bool
    67  }
    68  
    69  // Client is the primary interface used by the daemon to control a client's
    70  // lifecycle and backup revoked states.
    71  type Client interface {
    72  	// AddTower adds a new watchtower reachable at the given address and
    73  	// considers it for new sessions. If the watchtower already exists, then
    74  	// any new addresses included will be considered when dialing it for
    75  	// session negotiations and backups.
    76  	AddTower(*lnwire.NetAddress) error
    77  
    78  	// RemoveTower removes a watchtower from being considered for future
    79  	// session negotiations and from being used for any subsequent backups
    80  	// until it's added again. If an address is provided, then this call
    81  	// only serves as a way of removing the address from the watchtower
    82  	// instead.
    83  	RemoveTower(*secp256k1.PublicKey, net.Addr) error
    84  
    85  	// RegisteredTowers retrieves the list of watchtowers registered with
    86  	// the client.
    87  	RegisteredTowers() ([]*RegisteredTower, error)
    88  
    89  	// LookupTower retrieves a registered watchtower through its public key.
    90  	LookupTower(*secp256k1.PublicKey) (*RegisteredTower, error)
    91  
    92  	// Stats returns the in-memory statistics of the client since startup.
    93  	Stats() ClientStats
    94  
    95  	// Policy returns the active client policy configuration.
    96  	Policy() wtpolicy.Policy
    97  
    98  	// RegisterChannel persistently initializes any channel-dependent
    99  	// parameters within the client. This should be called during link
   100  	// startup to ensure that the client is able to support the link during
   101  	// operation.
   102  	RegisterChannel(lnwire.ChannelID) error
   103  
   104  	// BackupState initiates a request to back up a particular revoked
   105  	// state. If the method returns nil, the backup is guaranteed to be
   106  	// successful unless the client is force quit, or the justice
   107  	// transaction would create dust outputs when trying to abide by the
   108  	// negotiated policy. If the channel we're trying to back up doesn't
   109  	// have a tweak for the remote party's output, then isTweakless should
   110  	// be true.
   111  	BackupState(*lnwire.ChannelID, *lnwallet.BreachRetribution,
   112  		channeldb.ChannelType) error
   113  
   114  	// Start initializes the watchtower client, allowing it process requests
   115  	// to backup revoked channel states.
   116  	Start() error
   117  
   118  	// Stop attempts a graceful shutdown of the watchtower client. In doing
   119  	// so, it will attempt to flush the pipeline and deliver any queued
   120  	// states to the tower before exiting.
   121  	Stop() error
   122  
   123  	// ForceQuit will forcibly shutdown the watchtower client. Calling this
   124  	// may lead to queued states being dropped.
   125  	ForceQuit()
   126  }
   127  
   128  // Config provides the TowerClient with access to the resources it requires to
   129  // perform its duty. All nillable fields must be non-nil for the tower to be
   130  // initialized properly.
   131  type Config struct {
   132  	// Signer provides access to the wallet so that the client can sign
   133  	// justice transactions that spend from a remote party's commitment
   134  	// transaction.
   135  	Signer input.Signer
   136  
   137  	// NewAddress generates a new on-chain sweep pkscript.
   138  	NewAddress func() ([]byte, error)
   139  
   140  	// SecretKeyRing is used to derive the session keys used to communicate
   141  	// with the tower. The client only stores the KeyLocators internally so
   142  	// that we never store private keys on disk.
   143  	SecretKeyRing ECDHKeyRing
   144  
   145  	// Dial connects to an addr using the specified net and returns the
   146  	// connection object.
   147  	Dial tor.DialFunc
   148  
   149  	// AuthDialer establishes a brontide connection over an onion or clear
   150  	// network.
   151  	AuthDial AuthDialer
   152  
   153  	// DB provides access to the client's stable storage medium.
   154  	DB DB
   155  
   156  	// Policy is the session policy the client will propose when creating
   157  	// new sessions with the tower. If the policy differs from any active
   158  	// sessions recorded in the database, those sessions will be ignored and
   159  	// new sessions will be requested immediately.
   160  	Policy wtpolicy.Policy
   161  
   162  	// ChainHash identifies the chain that the client is on and for which
   163  	// the tower must be watching to monitor for breaches.
   164  	ChainHash chainhash.Hash
   165  
   166  	// ChainParams stores the parameters of the chain that the client is
   167  	// on.
   168  	ChainParams *chaincfg.Params
   169  
   170  	// ForceQuitDelay is the duration after attempting to shutdown that the
   171  	// client will automatically abort any pending backups if an unclean
   172  	// shutdown is detected. If the value is less than or equal to zero, a
   173  	// call to Stop may block indefinitely. The client can always be
   174  	// ForceQuit externally irrespective of the chosen parameter.
   175  	ForceQuitDelay time.Duration
   176  
   177  	// ReadTimeout is the duration we will wait during a read before
   178  	// breaking out of a blocking read. If the value is less than or equal
   179  	// to zero, the default will be used instead.
   180  	ReadTimeout time.Duration
   181  
   182  	// WriteTimeout is the duration we will wait during a write before
   183  	// breaking out of a blocking write. If the value is less than or equal
   184  	// to zero, the default will be used instead.
   185  	WriteTimeout time.Duration
   186  
   187  	// MinBackoff defines the initial backoff applied to connections with
   188  	// watchtowers. Subsequent backoff durations will grow exponentially up
   189  	// until MaxBackoff.
   190  	MinBackoff time.Duration
   191  
   192  	// MaxBackoff defines the maximum backoff applied to connections with
   193  	// watchtowers. If the exponential backoff produces a timeout greater
   194  	// than this value, the backoff will be clamped to MaxBackoff.
   195  	MaxBackoff time.Duration
   196  }
   197  
   198  // newTowerMsg is an internal message we'll use within the TowerClient to signal
   199  // that a new tower can be considered.
   200  type newTowerMsg struct {
   201  	// addr is the tower's reachable address that we'll use to establish a
   202  	// connection with.
   203  	addr *lnwire.NetAddress
   204  
   205  	// errChan is the channel through which we'll send a response back to
   206  	// the caller when handling their request.
   207  	//
   208  	// NOTE: This channel must be buffered.
   209  	errChan chan error
   210  }
   211  
   212  // staleTowerMsg is an internal message we'll use within the TowerClient to
   213  // signal that a tower should no longer be considered.
   214  type staleTowerMsg struct {
   215  	// pubKey is the identifying public key of the watchtower.
   216  	pubKey *secp256k1.PublicKey
   217  
   218  	// addr is an optional field that when set signals that the address
   219  	// should be removed from the watchtower's set of addresses, indicating
   220  	// that it is stale. If it's not set, then the watchtower should be
   221  	// no longer be considered for new sessions.
   222  	addr net.Addr
   223  
   224  	// errChan is the channel through which we'll send a response back to
   225  	// the caller when handling their request.
   226  	//
   227  	// NOTE: This channel must be buffered.
   228  	errChan chan error
   229  }
   230  
   231  // TowerClient is a concrete implementation of the Client interface, offering a
   232  // non-blocking, reliable subsystem for backing up revoked states to a specified
   233  // private tower.
   234  type TowerClient struct {
   235  	started sync.Once
   236  	stopped sync.Once
   237  	forced  sync.Once
   238  
   239  	cfg *Config
   240  
   241  	log slog.Logger
   242  
   243  	pipeline *taskPipeline
   244  
   245  	negotiator        SessionNegotiator
   246  	candidateTowers   TowerCandidateIterator
   247  	candidateSessions map[wtdb.SessionID]*wtdb.ClientSession
   248  	activeSessions    sessionQueueSet
   249  
   250  	sessionQueue *sessionQueue
   251  	prevTask     *backupTask
   252  
   253  	backupMu          sync.Mutex
   254  	summaries         wtdb.ChannelSummaries
   255  	chanCommitHeights map[lnwire.ChannelID]uint64
   256  
   257  	statTicker *time.Ticker
   258  	stats      *ClientStats
   259  
   260  	newTowers   chan *newTowerMsg
   261  	staleTowers chan *staleTowerMsg
   262  
   263  	wg        sync.WaitGroup
   264  	forceQuit chan struct{}
   265  }
   266  
   267  // Compile-time constraint to ensure *TowerClient implements the Client
   268  // interface.
   269  var _ Client = (*TowerClient)(nil)
   270  
   271  // New initializes a new TowerClient from the provide Config. An error is
   272  // returned if the client could not initialized.
   273  func New(config *Config) (*TowerClient, error) {
   274  	// Copy the config to prevent side-effects from modifying both the
   275  	// internal and external version of the Config.
   276  	cfg := new(Config)
   277  	*cfg = *config
   278  
   279  	// Set the read timeout to the default if none was provided.
   280  	if cfg.ReadTimeout <= 0 {
   281  		cfg.ReadTimeout = DefaultReadTimeout
   282  	}
   283  
   284  	// Set the write timeout to the default if none was provided.
   285  	if cfg.WriteTimeout <= 0 {
   286  		cfg.WriteTimeout = DefaultWriteTimeout
   287  	}
   288  
   289  	prefix := "(legacy)"
   290  	if cfg.Policy.IsAnchorChannel() {
   291  		prefix = "(anchor)"
   292  	}
   293  	plog := build.NewPrefixLog(prefix, log)
   294  
   295  	// Next, load all candidate sessions and towers from the database into
   296  	// the client. We will use any of these session if their policies match
   297  	// the current policy of the client, otherwise they will be ignored and
   298  	// new sessions will be requested.
   299  	isAnchorClient := cfg.Policy.IsAnchorChannel()
   300  	activeSessionFilter := genActiveSessionFilter(isAnchorClient)
   301  	candidateSessions, err := getClientSessions(
   302  		cfg.DB, cfg.SecretKeyRing, nil, activeSessionFilter,
   303  	)
   304  	if err != nil {
   305  		return nil, err
   306  	}
   307  
   308  	var candidateTowers []*wtdb.Tower
   309  	for _, s := range candidateSessions {
   310  		plog.Infof("Using private watchtower %s, offering policy %s",
   311  			s.Tower, cfg.Policy)
   312  		candidateTowers = append(candidateTowers, s.Tower)
   313  	}
   314  
   315  	// Load the sweep pkscripts that have been generated for all previously
   316  	// registered channels.
   317  	chanSummaries, err := cfg.DB.FetchChanSummaries()
   318  	if err != nil {
   319  		return nil, err
   320  	}
   321  
   322  	c := &TowerClient{
   323  		cfg:               cfg,
   324  		log:               plog,
   325  		pipeline:          newTaskPipeline(plog),
   326  		candidateTowers:   newTowerListIterator(candidateTowers...),
   327  		candidateSessions: candidateSessions,
   328  		activeSessions:    make(sessionQueueSet),
   329  		summaries:         chanSummaries,
   330  		statTicker:        time.NewTicker(DefaultStatInterval),
   331  		stats:             new(ClientStats),
   332  		newTowers:         make(chan *newTowerMsg),
   333  		staleTowers:       make(chan *staleTowerMsg),
   334  		forceQuit:         make(chan struct{}),
   335  	}
   336  	c.negotiator = newSessionNegotiator(&NegotiatorConfig{
   337  		DB:            cfg.DB,
   338  		SecretKeyRing: cfg.SecretKeyRing,
   339  		Policy:        cfg.Policy,
   340  		ChainHash:     cfg.ChainHash,
   341  		SendMessage:   c.sendMessage,
   342  		ReadMessage:   c.readMessage,
   343  		Dial:          c.dial,
   344  		Candidates:    c.candidateTowers,
   345  		MinBackoff:    cfg.MinBackoff,
   346  		MaxBackoff:    cfg.MaxBackoff,
   347  		Log:           plog,
   348  	})
   349  
   350  	// Reconstruct the highest commit height processed for each channel
   351  	// under the client's current policy.
   352  	c.buildHighestCommitHeights()
   353  
   354  	return c, nil
   355  }
   356  
   357  // getClientSessions retrieves the client sessions for a particular tower if
   358  // specified, otherwise all client sessions for all towers are retrieved. An
   359  // optional filter can be provided to filter out any undesired client sessions.
   360  //
   361  // NOTE: This method should only be used when deserialization of a
   362  // ClientSession's Tower and SessionPrivKey fields is desired, otherwise, the
   363  // existing ListClientSessions method should be used.
   364  func getClientSessions(db DB, keyRing ECDHKeyRing, forTower *wtdb.TowerID,
   365  	passesFilter func(*wtdb.ClientSession) bool) (
   366  	map[wtdb.SessionID]*wtdb.ClientSession, error) {
   367  
   368  	sessions, err := db.ListClientSessions(forTower)
   369  	if err != nil {
   370  		return nil, err
   371  	}
   372  
   373  	// Reload the tower from disk using the tower ID contained in each
   374  	// candidate session. We will also rederive any session keys needed to
   375  	// be able to communicate with the towers and authenticate session
   376  	// requests. This prevents us from having to store the private keys on
   377  	// disk.
   378  	for _, s := range sessions {
   379  		tower, err := db.LoadTowerByID(s.TowerID)
   380  		if err != nil {
   381  			return nil, err
   382  		}
   383  		s.Tower = tower
   384  
   385  		towerKeyDesc, err := keyRing.DeriveKey(keychain.KeyLocator{
   386  			Family: keychain.KeyFamilyTowerSession,
   387  			Index:  s.KeyIndex,
   388  		})
   389  		if err != nil {
   390  			return nil, err
   391  		}
   392  		s.SessionKeyECDH = keychain.NewPubKeyECDH(towerKeyDesc, keyRing)
   393  
   394  		// If an optional filter was provided, use it to filter out any
   395  		// undesired sessions.
   396  		if passesFilter != nil && !passesFilter(s) {
   397  			delete(sessions, s.ID)
   398  		}
   399  	}
   400  
   401  	return sessions, nil
   402  }
   403  
   404  // buildHighestCommitHeights inspects the full set of candidate client sessions
   405  // loaded from disk, and determines the highest known commit height for each
   406  // channel. This allows the client to reject backups that it has already
   407  // processed for it's active policy.
   408  func (c *TowerClient) buildHighestCommitHeights() {
   409  	chanCommitHeights := make(map[lnwire.ChannelID]uint64)
   410  	for _, s := range c.candidateSessions {
   411  		// We only want to consider accepted updates that have been
   412  		// accepted under an identical policy to the client's current
   413  		// policy.
   414  		if s.Policy != c.cfg.Policy {
   415  			continue
   416  		}
   417  
   418  		// Take the highest commit height found in the session's
   419  		// committed updates.
   420  		for _, committedUpdate := range s.CommittedUpdates {
   421  			bid := committedUpdate.BackupID
   422  
   423  			height, ok := chanCommitHeights[bid.ChanID]
   424  			if !ok || bid.CommitHeight > height {
   425  				chanCommitHeights[bid.ChanID] = bid.CommitHeight
   426  			}
   427  		}
   428  
   429  		// Take the heights commit height found in the session's acked
   430  		// updates.
   431  		for _, bid := range s.AckedUpdates {
   432  			height, ok := chanCommitHeights[bid.ChanID]
   433  			if !ok || bid.CommitHeight > height {
   434  				chanCommitHeights[bid.ChanID] = bid.CommitHeight
   435  			}
   436  		}
   437  	}
   438  
   439  	c.chanCommitHeights = chanCommitHeights
   440  }
   441  
   442  // Start initializes the watchtower client by loading or negotiating an active
   443  // session and then begins processing backup tasks from the request pipeline.
   444  func (c *TowerClient) Start() error {
   445  	var err error
   446  	c.started.Do(func() {
   447  		c.log.Infof("Starting watchtower client")
   448  
   449  		// First, restart a session queue for any sessions that have
   450  		// committed but unacked state updates. This ensures that these
   451  		// sessions will be able to flush the committed updates after a
   452  		// restart.
   453  		for _, session := range c.candidateSessions {
   454  			if len(session.CommittedUpdates) > 0 {
   455  				c.log.Infof("Starting session=%s to process "+
   456  					"%d committed backups", session.ID,
   457  					len(session.CommittedUpdates))
   458  				c.initActiveQueue(session)
   459  			}
   460  		}
   461  
   462  		// Now start the session negotiator, which will allow us to
   463  		// request new session as soon as the backupDispatcher starts
   464  		// up.
   465  		err = c.negotiator.Start()
   466  		if err != nil {
   467  			return
   468  		}
   469  
   470  		// Start the task pipeline to which new backup tasks will be
   471  		// submitted from active links.
   472  		c.pipeline.Start()
   473  
   474  		c.wg.Add(1)
   475  		go c.backupDispatcher()
   476  
   477  		c.log.Infof("Watchtower client started successfully")
   478  	})
   479  	return err
   480  }
   481  
   482  // Stop idempotently initiates a graceful shutdown of the watchtower client.
   483  func (c *TowerClient) Stop() error {
   484  	c.stopped.Do(func() {
   485  		c.log.Debugf("Stopping watchtower client")
   486  
   487  		// 1. To ensure we don't hang forever on shutdown due to
   488  		// unintended failures, we'll delay a call to force quit the
   489  		// pipeline if a ForceQuitDelay is specified. This will have no
   490  		// effect if the pipeline shuts down cleanly before the delay
   491  		// fires.
   492  		//
   493  		// For full safety, this can be set to 0 and wait out
   494  		// indefinitely.  However for mobile clients which may have a
   495  		// limited amount of time to exit before the background process
   496  		// is killed, this offers a way to ensure the process
   497  		// terminates.
   498  		if c.cfg.ForceQuitDelay > 0 {
   499  			time.AfterFunc(c.cfg.ForceQuitDelay, c.ForceQuit)
   500  		}
   501  
   502  		// 2. Shutdown the backup queue, which will prevent any further
   503  		// updates from being accepted. In practice, the links should be
   504  		// shutdown before the client has been stopped, so all updates
   505  		// would have been added prior.
   506  		c.pipeline.Stop()
   507  
   508  		// 3. Once the backup queue has shutdown, wait for the main
   509  		// dispatcher to exit. The backup queue will signal it's
   510  		// completion to the dispatcher, which releases the wait group
   511  		// after all tasks have been assigned to session queues.
   512  		c.wg.Wait()
   513  
   514  		// 4. Since all valid tasks have been assigned to session
   515  		// queues, we no longer need to negotiate sessions.
   516  		c.negotiator.Stop()
   517  
   518  		c.log.Debugf("Waiting for active session queues to finish "+
   519  			"draining, stats: %s", c.stats)
   520  
   521  		// 5. Shutdown all active session queues in parallel. These will
   522  		// exit once all updates have been acked by the watchtower.
   523  		c.activeSessions.ApplyAndWait(func(s *sessionQueue) func() {
   524  			return s.Stop
   525  		})
   526  
   527  		// Skip log if force quitting.
   528  		select {
   529  		case <-c.forceQuit:
   530  			return
   531  		default:
   532  		}
   533  
   534  		c.log.Debugf("Client successfully stopped, stats: %s", c.stats)
   535  	})
   536  	return nil
   537  }
   538  
   539  // ForceQuit idempotently initiates an unclean shutdown of the watchtower
   540  // client. This should only be executed if Stop is unable to exit cleanly.
   541  func (c *TowerClient) ForceQuit() {
   542  	c.forced.Do(func() {
   543  		c.log.Infof("Force quitting watchtower client")
   544  
   545  		// 1. Shutdown the backup queue, which will prevent any further
   546  		// updates from being accepted. In practice, the links should be
   547  		// shutdown before the client has been stopped, so all updates
   548  		// would have been added prior.
   549  		c.pipeline.ForceQuit()
   550  
   551  		// 2. Once the backup queue has shutdown, wait for the main
   552  		// dispatcher to exit. The backup queue will signal it's
   553  		// completion to the dispatcher, which releases the wait group
   554  		// after all tasks have been assigned to session queues.
   555  		close(c.forceQuit)
   556  		c.wg.Wait()
   557  
   558  		// 3. Since all valid tasks have been assigned to session
   559  		// queues, we no longer need to negotiate sessions.
   560  		c.negotiator.Stop()
   561  
   562  		// 4. Force quit all active session queues in parallel. These
   563  		// will exit once all updates have been acked by the watchtower.
   564  		c.activeSessions.ApplyAndWait(func(s *sessionQueue) func() {
   565  			return s.ForceQuit
   566  		})
   567  
   568  		c.log.Infof("Watchtower client unclean shutdown complete, "+
   569  			"stats: %s", c.stats)
   570  	})
   571  }
   572  
   573  // RegisterChannel persistently initializes any channel-dependent parameters
   574  // within the client. This should be called during link startup to ensure that
   575  // the client is able to support the link during operation.
   576  func (c *TowerClient) RegisterChannel(chanID lnwire.ChannelID) error {
   577  	c.backupMu.Lock()
   578  	defer c.backupMu.Unlock()
   579  
   580  	// If a pkscript for this channel already exists, the channel has been
   581  	// previously registered.
   582  	if _, ok := c.summaries[chanID]; ok {
   583  		return nil
   584  	}
   585  
   586  	// Otherwise, generate a new sweep pkscript used to sweep funds for this
   587  	// channel.
   588  	pkScript, err := c.cfg.NewAddress()
   589  	if err != nil {
   590  		return err
   591  	}
   592  
   593  	// Persist the sweep pkscript so that restarts will not introduce
   594  	// address inflation when the channel is reregistered after a restart.
   595  	err = c.cfg.DB.RegisterChannel(chanID, pkScript)
   596  	if err != nil {
   597  		return err
   598  	}
   599  
   600  	// Finally, cache the pkscript in our in-memory cache to avoid db
   601  	// lookups for the remainder of the daemon's execution.
   602  	c.summaries[chanID] = wtdb.ClientChanSummary{
   603  		SweepPkScript: pkScript,
   604  	}
   605  
   606  	return nil
   607  }
   608  
   609  // BackupState initiates a request to back up a particular revoked state. If the
   610  // method returns nil, the backup is guaranteed to be successful unless the:
   611  //   - client is force quit,
   612  //   - justice transaction would create dust outputs when trying to abide by the
   613  //     negotiated policy, or
   614  //   - breached outputs contain too little value to sweep at the target sweep fee
   615  //     rate.
   616  func (c *TowerClient) BackupState(chanID *lnwire.ChannelID,
   617  	breachInfo *lnwallet.BreachRetribution,
   618  	chanType channeldb.ChannelType) error {
   619  
   620  	// Retrieve the cached sweep pkscript used for this channel.
   621  	c.backupMu.Lock()
   622  	summary, ok := c.summaries[*chanID]
   623  	if !ok {
   624  		c.backupMu.Unlock()
   625  		return ErrUnregisteredChannel
   626  	}
   627  
   628  	// Ignore backups that have already been presented to the client.
   629  	height, ok := c.chanCommitHeights[*chanID]
   630  	if ok && breachInfo.RevokedStateNum <= height {
   631  		c.backupMu.Unlock()
   632  		c.log.Debugf("Ignoring duplicate backup for chanid=%v at height=%d",
   633  			chanID, breachInfo.RevokedStateNum)
   634  		return nil
   635  	}
   636  
   637  	// This backup has a higher commit height than any known backup for this
   638  	// channel. We'll update our tip so that we won't accept it again if the
   639  	// link flaps.
   640  	c.chanCommitHeights[*chanID] = breachInfo.RevokedStateNum
   641  	c.backupMu.Unlock()
   642  
   643  	task := newBackupTask(
   644  		chanID, breachInfo, summary.SweepPkScript, chanType,
   645  		c.cfg.ChainParams,
   646  	)
   647  
   648  	return c.pipeline.QueueBackupTask(task)
   649  }
   650  
   651  // nextSessionQueue attempts to fetch an active session from our set of
   652  // candidate sessions. Candidate sessions with a differing policy from the
   653  // active client's advertised policy will be ignored, but may be resumed if the
   654  // client is restarted with a matching policy. If no candidates were found, nil
   655  // is returned to signal that we need to request a new policy.
   656  func (c *TowerClient) nextSessionQueue() *sessionQueue {
   657  	// Select any candidate session at random, and remove it from the set of
   658  	// candidate sessions.
   659  	var candidateSession *wtdb.ClientSession
   660  	for id, sessionInfo := range c.candidateSessions {
   661  		delete(c.candidateSessions, id)
   662  
   663  		// Skip any sessions with policies that don't match the current
   664  		// TxPolicy, as they would result in different justice
   665  		// transactions from what is requested. These can be used again
   666  		// if the client changes their configuration and restarting.
   667  		if sessionInfo.Policy.TxPolicy != c.cfg.Policy.TxPolicy {
   668  			continue
   669  		}
   670  
   671  		candidateSession = sessionInfo
   672  		break
   673  	}
   674  
   675  	// If none of the sessions could be used or none were found, we'll
   676  	// return nil to signal that we need another session to be negotiated.
   677  	if candidateSession == nil {
   678  		return nil
   679  	}
   680  
   681  	// Initialize the session queue and spin it up so it can begin handling
   682  	// updates. If the queue was already made active on startup, this will
   683  	// simply return the existing session queue from the set.
   684  	return c.getOrInitActiveQueue(candidateSession)
   685  }
   686  
   687  // backupDispatcher processes events coming from the taskPipeline and is
   688  // responsible for detecting when the client needs to renegotiate a session to
   689  // fulfill continuing demand. The event loop exits after all tasks have been
   690  // received from the upstream taskPipeline, or the taskPipeline is force quit.
   691  //
   692  // NOTE: This method MUST be run as a goroutine.
   693  func (c *TowerClient) backupDispatcher() {
   694  	defer c.wg.Done()
   695  
   696  	c.log.Tracef("Starting backup dispatcher")
   697  	defer c.log.Tracef("Stopping backup dispatcher")
   698  
   699  	for {
   700  		switch {
   701  
   702  		// No active session queue and no additional sessions.
   703  		case c.sessionQueue == nil && len(c.candidateSessions) == 0:
   704  			c.log.Infof("Requesting new session.")
   705  
   706  			// Immediately request a new session.
   707  			c.negotiator.RequestSession()
   708  
   709  			// Wait until we receive the newly negotiated session.
   710  			// All backups sent in the meantime are queued in the
   711  			// revoke queue, as we cannot process them.
   712  		awaitSession:
   713  			select {
   714  			case session := <-c.negotiator.NewSessions():
   715  				c.log.Infof("Acquired new session with id=%s",
   716  					session.ID)
   717  				c.candidateSessions[session.ID] = session
   718  				c.stats.sessionAcquired()
   719  
   720  				// We'll continue to choose the newly negotiated
   721  				// session as our active session queue.
   722  				continue
   723  
   724  			case <-c.statTicker.C:
   725  				c.log.Infof("Client stats: %s", c.stats)
   726  
   727  			// A new tower has been requested to be added. We'll
   728  			// update our persisted and in-memory state and consider
   729  			// its corresponding sessions, if any, as new
   730  			// candidates.
   731  			case msg := <-c.newTowers:
   732  				msg.errChan <- c.handleNewTower(msg)
   733  
   734  			// A tower has been requested to be removed. We'll
   735  			// immediately return an error as we want to avoid the
   736  			// possibility of a new session being negotiated with
   737  			// this request's tower.
   738  			case msg := <-c.staleTowers:
   739  				msg.errChan <- errors.New("removing towers " +
   740  					"is disallowed while a new session " +
   741  					"negotiation is in progress")
   742  
   743  			case <-c.forceQuit:
   744  				return
   745  			}
   746  
   747  			// Instead of looping, we'll jump back into the select
   748  			// case and await the delivery of the session to prevent
   749  			// us from re-requesting additional sessions.
   750  			goto awaitSession
   751  
   752  		// No active session queue but have additional sessions.
   753  		case c.sessionQueue == nil && len(c.candidateSessions) > 0:
   754  			// We've exhausted the prior session, we'll pop another
   755  			// from the remaining sessions and continue processing
   756  			// backup tasks.
   757  			c.sessionQueue = c.nextSessionQueue()
   758  			if c.sessionQueue != nil {
   759  				c.log.Debugf("Loaded next candidate session "+
   760  					"queue id=%s", c.sessionQueue.ID())
   761  			}
   762  
   763  		// Have active session queue, process backups.
   764  		case c.sessionQueue != nil:
   765  			if c.prevTask != nil {
   766  				c.processTask(c.prevTask)
   767  
   768  				// Continue to ensure the sessionQueue is
   769  				// properly initialized before attempting to
   770  				// process more tasks from the pipeline.
   771  				continue
   772  			}
   773  
   774  			// Normal operation where new tasks are read from the
   775  			// pipeline.
   776  			select {
   777  
   778  			// If any sessions are negotiated while we have an
   779  			// active session queue, queue them for future use.
   780  			// This shouldn't happen with the current design, so
   781  			// it doesn't hurt to select here just in case. In the
   782  			// future, we will likely allow more asynchrony so that
   783  			// we can request new sessions before the session is
   784  			// fully empty, which this case would handle.
   785  			case session := <-c.negotiator.NewSessions():
   786  				c.log.Warnf("Acquired new session with id=%s "+
   787  					"while processing tasks", session.ID)
   788  				c.candidateSessions[session.ID] = session
   789  				c.stats.sessionAcquired()
   790  
   791  			case <-c.statTicker.C:
   792  				c.log.Infof("Client stats: %s", c.stats)
   793  
   794  			// Process each backup task serially from the queue of
   795  			// revoked states.
   796  			case task, ok := <-c.pipeline.NewBackupTasks():
   797  				// All backups in the pipeline have been
   798  				// processed, it is now safe to exit.
   799  				if !ok {
   800  					return
   801  				}
   802  
   803  				c.log.Debugf("Processing %v", task.id)
   804  
   805  				c.stats.taskReceived()
   806  				c.processTask(task)
   807  
   808  			// A new tower has been requested to be added. We'll
   809  			// update our persisted and in-memory state and consider
   810  			// its corresponding sessions, if any, as new
   811  			// candidates.
   812  			case msg := <-c.newTowers:
   813  				msg.errChan <- c.handleNewTower(msg)
   814  
   815  			// A tower has been removed, so we'll remove certain
   816  			// information that's persisted and also in our
   817  			// in-memory state depending on the request, and set any
   818  			// of its corresponding candidate sessions as inactive.
   819  			case msg := <-c.staleTowers:
   820  				msg.errChan <- c.handleStaleTower(msg)
   821  			}
   822  		}
   823  	}
   824  }
   825  
   826  // processTask attempts to schedule the given backupTask on the active
   827  // sessionQueue. The task will either be accepted or rejected, afterwhich the
   828  // appropriate modifications to the client's state machine will be made. After
   829  // every invocation of processTask, the caller should ensure that the
   830  // sessionQueue hasn't been exhausted before proceeding to the next task. Tasks
   831  // that are rejected because the active sessionQueue is full will be cached as
   832  // the prevTask, and should be reprocessed after obtaining a new sessionQueue.
   833  func (c *TowerClient) processTask(task *backupTask) {
   834  	status, accepted := c.sessionQueue.AcceptTask(task)
   835  	if accepted {
   836  		c.taskAccepted(task, status)
   837  	} else {
   838  		c.taskRejected(task, status)
   839  	}
   840  }
   841  
   842  // taskAccepted processes the acceptance of a task by a sessionQueue depending
   843  // on the state the sessionQueue is in *after* the task is added. The client's
   844  // prevTask is always removed as a result of this call. The client's
   845  // sessionQueue will be removed if accepting the task left the sessionQueue in
   846  // an exhausted state.
   847  func (c *TowerClient) taskAccepted(task *backupTask, newStatus reserveStatus) {
   848  	c.log.Infof("Queued %v successfully for session %v",
   849  		task.id, c.sessionQueue.ID())
   850  
   851  	c.stats.taskAccepted()
   852  
   853  	// If this task was accepted, we discard anything held in the prevTask.
   854  	// Either it was nil before, or is the task which was just accepted.
   855  	c.prevTask = nil
   856  
   857  	switch newStatus {
   858  
   859  	// The sessionQueue still has capacity after accepting this task.
   860  	case reserveAvailable:
   861  
   862  	// The sessionQueue is full after accepting this task, so we will need
   863  	// to request a new one before proceeding.
   864  	case reserveExhausted:
   865  		c.stats.sessionExhausted()
   866  
   867  		c.log.Debugf("Session %s exhausted", c.sessionQueue.ID())
   868  
   869  		// This task left the session exhausted, set it to nil and
   870  		// proceed to the next loop so we can consume another
   871  		// pre-negotiated session or request another.
   872  		c.sessionQueue = nil
   873  	}
   874  }
   875  
   876  // taskRejected process the rejection of a task by a sessionQueue depending on
   877  // the state the was in *before* the task was rejected. The client's prevTask
   878  // will cache the task if the sessionQueue was exhausted before hand, and nil
   879  // the sessionQueue to find a new session. If the sessionQueue was not
   880  // exhausted, the client marks the task as ineligible, as this implies we
   881  // couldn't construct a valid justice transaction given the session's policy.
   882  func (c *TowerClient) taskRejected(task *backupTask, curStatus reserveStatus) {
   883  	switch curStatus {
   884  
   885  	// The sessionQueue has available capacity but the task was rejected,
   886  	// this indicates that the task was ineligible for backup.
   887  	case reserveAvailable:
   888  		c.stats.taskIneligible()
   889  
   890  		c.log.Infof("Ignoring ineligible %v", task.id)
   891  
   892  		err := c.cfg.DB.MarkBackupIneligible(
   893  			task.id.ChanID, task.id.CommitHeight,
   894  		)
   895  		if err != nil {
   896  			c.log.Errorf("Unable to mark %v ineligible: %v",
   897  				task.id, err)
   898  
   899  			// It is safe to not handle this error, even if we could
   900  			// not persist the result. At worst, this task may be
   901  			// reprocessed on a subsequent start up, and will either
   902  			// succeed do a change in session parameters or fail in
   903  			// the same manner.
   904  		}
   905  
   906  		// If this task was rejected *and* the session had available
   907  		// capacity, we discard anything held in the prevTask. Either it
   908  		// was nil before, or is the task which was just rejected.
   909  		c.prevTask = nil
   910  
   911  	// The sessionQueue rejected the task because it is full, we will stash
   912  	// this task and try to add it to the next available sessionQueue.
   913  	case reserveExhausted:
   914  		c.stats.sessionExhausted()
   915  
   916  		c.log.Debugf("Session %v exhausted, %v queued for next session",
   917  			c.sessionQueue.ID(), task.id)
   918  
   919  		// Cache the task that we pulled off, so that we can process it
   920  		// once a new session queue is available.
   921  		c.sessionQueue = nil
   922  		c.prevTask = task
   923  	}
   924  }
   925  
   926  // dial connects the peer at addr using privKey as our secret key for the
   927  // connection. The connection will use the configured Net's resolver to resolve
   928  // the address for either Tor or clear net connections.
   929  func (c *TowerClient) dial(localKey keychain.SingleKeyECDH,
   930  	addr *lnwire.NetAddress) (wtserver.Peer, error) {
   931  
   932  	return c.cfg.AuthDial(localKey, addr, c.cfg.Dial)
   933  }
   934  
   935  // readMessage receives and parses the next message from the given Peer. An
   936  // error is returned if a message is not received before the server's read
   937  // timeout, the read off the wire failed, or the message could not be
   938  // deserialized.
   939  func (c *TowerClient) readMessage(peer wtserver.Peer) (wtwire.Message, error) {
   940  	// Set a read timeout to ensure we drop the connection if nothing is
   941  	// received in a timely manner.
   942  	err := peer.SetReadDeadline(time.Now().Add(c.cfg.ReadTimeout))
   943  	if err != nil {
   944  		err = fmt.Errorf("unable to set read deadline: %v", err)
   945  		c.log.Errorf("Unable to read msg: %v", err)
   946  		return nil, err
   947  	}
   948  
   949  	// Pull the next message off the wire,
   950  	rawMsg, err := peer.ReadNextMessage()
   951  	if err != nil {
   952  		err = fmt.Errorf("unable to read message: %v", err)
   953  		c.log.Errorf("Unable to read msg: %v", err)
   954  		return nil, err
   955  	}
   956  
   957  	// Parse the received message according to the watchtower wire
   958  	// specification.
   959  	msgReader := bytes.NewReader(rawMsg)
   960  	msg, err := wtwire.ReadMessage(msgReader, 0)
   961  	if err != nil {
   962  		err = fmt.Errorf("unable to parse message: %v", err)
   963  		c.log.Errorf("Unable to read msg: %v", err)
   964  		return nil, err
   965  	}
   966  
   967  	c.logMessage(peer, msg, true)
   968  
   969  	return msg, nil
   970  }
   971  
   972  // sendMessage sends a watchtower wire message to the target peer.
   973  func (c *TowerClient) sendMessage(peer wtserver.Peer, msg wtwire.Message) error {
   974  	// Encode the next wire message into the buffer.
   975  	// TODO(conner): use buffer pool
   976  	var b bytes.Buffer
   977  	_, err := wtwire.WriteMessage(&b, msg, 0)
   978  	if err != nil {
   979  		err = fmt.Errorf("Unable to encode msg: %v", err)
   980  		c.log.Errorf("Unable to send msg: %v", err)
   981  		return err
   982  	}
   983  
   984  	// Set the write deadline for the connection, ensuring we drop the
   985  	// connection if nothing is sent in a timely manner.
   986  	err = peer.SetWriteDeadline(time.Now().Add(c.cfg.WriteTimeout))
   987  	if err != nil {
   988  		err = fmt.Errorf("unable to set write deadline: %v", err)
   989  		c.log.Errorf("Unable to send msg: %v", err)
   990  		return err
   991  	}
   992  
   993  	c.logMessage(peer, msg, false)
   994  
   995  	// Write out the full message to the remote peer.
   996  	_, err = peer.Write(b.Bytes())
   997  	if err != nil {
   998  		c.log.Errorf("Unable to send msg: %v", err)
   999  	}
  1000  	return err
  1001  }
  1002  
  1003  // newSessionQueue creates a sessionQueue from a ClientSession loaded from the
  1004  // database and supplying it with the resources needed by the client.
  1005  func (c *TowerClient) newSessionQueue(s *wtdb.ClientSession) *sessionQueue {
  1006  	return newSessionQueue(&sessionQueueConfig{
  1007  		ClientSession: s,
  1008  		ChainHash:     c.cfg.ChainHash,
  1009  		Dial:          c.dial,
  1010  		ReadMessage:   c.readMessage,
  1011  		SendMessage:   c.sendMessage,
  1012  		Signer:        c.cfg.Signer,
  1013  		DB:            c.cfg.DB,
  1014  		MinBackoff:    c.cfg.MinBackoff,
  1015  		MaxBackoff:    c.cfg.MaxBackoff,
  1016  		Log:           c.log,
  1017  	})
  1018  }
  1019  
  1020  // getOrInitActiveQueue checks the activeSessions set for a sessionQueue for the
  1021  // passed ClientSession. If it exists, the active sessionQueue is returned.
  1022  // Otherwise a new sessionQueue is initialized and added to the set.
  1023  func (c *TowerClient) getOrInitActiveQueue(s *wtdb.ClientSession) *sessionQueue {
  1024  	if sq, ok := c.activeSessions[s.ID]; ok {
  1025  		return sq
  1026  	}
  1027  
  1028  	return c.initActiveQueue(s)
  1029  }
  1030  
  1031  // initActiveQueue creates a new sessionQueue from the passed ClientSession,
  1032  // adds the sessionQueue to the activeSessions set, and starts the sessionQueue
  1033  // so that it can deliver any committed updates or begin accepting newly
  1034  // assigned tasks.
  1035  func (c *TowerClient) initActiveQueue(s *wtdb.ClientSession) *sessionQueue {
  1036  	// Initialize the session queue, providing it with all of the resources
  1037  	// it requires from the client instance.
  1038  	sq := c.newSessionQueue(s)
  1039  
  1040  	// Add the session queue as an active session so that we remember to
  1041  	// stop it on shutdown.
  1042  	c.activeSessions.Add(sq)
  1043  
  1044  	// Start the queue so that it can be active in processing newly assigned
  1045  	// tasks or to upload previously committed updates.
  1046  	sq.Start()
  1047  
  1048  	return sq
  1049  }
  1050  
  1051  // AddTower adds a new watchtower reachable at the given address and considers
  1052  // it for new sessions. If the watchtower already exists, then any new addresses
  1053  // included will be considered when dialing it for session negotiations and
  1054  // backups.
  1055  func (c *TowerClient) AddTower(addr *lnwire.NetAddress) error {
  1056  	errChan := make(chan error, 1)
  1057  
  1058  	select {
  1059  	case c.newTowers <- &newTowerMsg{
  1060  		addr:    addr,
  1061  		errChan: errChan,
  1062  	}:
  1063  	case <-c.pipeline.quit:
  1064  		return ErrClientExiting
  1065  	case <-c.pipeline.forceQuit:
  1066  		return ErrClientExiting
  1067  	}
  1068  
  1069  	select {
  1070  	case err := <-errChan:
  1071  		return err
  1072  	case <-c.pipeline.quit:
  1073  		return ErrClientExiting
  1074  	case <-c.pipeline.forceQuit:
  1075  		return ErrClientExiting
  1076  	}
  1077  }
  1078  
  1079  // handleNewTower handles a request for a new tower to be added. If the tower
  1080  // already exists, then its corresponding sessions, if any, will be set
  1081  // considered as candidates.
  1082  func (c *TowerClient) handleNewTower(msg *newTowerMsg) error {
  1083  	// We'll start by updating our persisted state, followed by our
  1084  	// in-memory state, with the new tower. This might not actually be a new
  1085  	// tower, but it might include a new address at which it can be reached.
  1086  	tower, err := c.cfg.DB.CreateTower(msg.addr)
  1087  	if err != nil {
  1088  		return err
  1089  	}
  1090  	c.candidateTowers.AddCandidate(tower)
  1091  
  1092  	// Include all of its corresponding sessions to our set of candidates.
  1093  	isAnchorClient := c.cfg.Policy.IsAnchorChannel()
  1094  	activeSessionFilter := genActiveSessionFilter(isAnchorClient)
  1095  	sessions, err := getClientSessions(
  1096  		c.cfg.DB, c.cfg.SecretKeyRing, &tower.ID, activeSessionFilter,
  1097  	)
  1098  	if err != nil {
  1099  		return fmt.Errorf("unable to determine sessions for tower %x: "+
  1100  			"%v", tower.IdentityKey.SerializeCompressed(), err)
  1101  	}
  1102  	for id, session := range sessions {
  1103  		c.candidateSessions[id] = session
  1104  	}
  1105  
  1106  	return nil
  1107  }
  1108  
  1109  // RemoveTower removes a watchtower from being considered for future session
  1110  // negotiations and from being used for any subsequent backups until it's added
  1111  // again. If an address is provided, then this call only serves as a way of
  1112  // removing the address from the watchtower instead.
  1113  func (c *TowerClient) RemoveTower(pubKey *secp256k1.PublicKey, addr net.Addr) error {
  1114  	errChan := make(chan error, 1)
  1115  
  1116  	select {
  1117  	case c.staleTowers <- &staleTowerMsg{
  1118  		pubKey:  pubKey,
  1119  		addr:    addr,
  1120  		errChan: errChan,
  1121  	}:
  1122  	case <-c.pipeline.quit:
  1123  		return ErrClientExiting
  1124  	case <-c.pipeline.forceQuit:
  1125  		return ErrClientExiting
  1126  	}
  1127  
  1128  	select {
  1129  	case err := <-errChan:
  1130  		return err
  1131  	case <-c.pipeline.quit:
  1132  		return ErrClientExiting
  1133  	case <-c.pipeline.forceQuit:
  1134  		return ErrClientExiting
  1135  	}
  1136  }
  1137  
  1138  // handleNewTower handles a request for an existing tower to be removed. If none
  1139  // of the tower's sessions have pending updates, then they will become inactive
  1140  // and removed as candidates. If the active session queue corresponds to any of
  1141  // these sessions, a new one will be negotiated.
  1142  func (c *TowerClient) handleStaleTower(msg *staleTowerMsg) error {
  1143  	// We'll load the tower before potentially removing it in order to
  1144  	// retrieve its ID within the database.
  1145  	tower, err := c.cfg.DB.LoadTower(msg.pubKey)
  1146  	if err != nil {
  1147  		return err
  1148  	}
  1149  
  1150  	// We'll update our persisted state, followed by our in-memory state,
  1151  	// with the stale tower.
  1152  	if err := c.cfg.DB.RemoveTower(msg.pubKey, msg.addr); err != nil {
  1153  		return err
  1154  	}
  1155  	err = c.candidateTowers.RemoveCandidate(tower.ID, msg.addr)
  1156  	if err != nil {
  1157  		return err
  1158  	}
  1159  
  1160  	// If an address was provided, then we're only meant to remove the
  1161  	// address from the tower, so there's nothing left for us to do.
  1162  	if msg.addr != nil {
  1163  		return nil
  1164  	}
  1165  
  1166  	// Otherwise, the tower should no longer be used for future session
  1167  	// negotiations and backups.
  1168  	pubKey := msg.pubKey.SerializeCompressed()
  1169  	sessions, err := c.cfg.DB.ListClientSessions(&tower.ID)
  1170  	if err != nil {
  1171  		return fmt.Errorf("unable to retrieve sessions for tower %x: "+
  1172  			"%v", pubKey, err)
  1173  	}
  1174  	for sessionID := range sessions {
  1175  		delete(c.candidateSessions, sessionID)
  1176  	}
  1177  
  1178  	// If our active session queue corresponds to the stale tower, we'll
  1179  	// proceed to negotiate a new one.
  1180  	if c.sessionQueue != nil {
  1181  		activeTower := c.sessionQueue.towerAddr.IdentityKey.SerializeCompressed()
  1182  		if bytes.Equal(pubKey, activeTower) {
  1183  			c.sessionQueue = nil
  1184  		}
  1185  	}
  1186  
  1187  	return nil
  1188  }
  1189  
  1190  // RegisteredTowers retrieves the list of watchtowers registered with the
  1191  // client.
  1192  func (c *TowerClient) RegisteredTowers() ([]*RegisteredTower, error) {
  1193  	// Retrieve all of our towers along with all of our sessions.
  1194  	towers, err := c.cfg.DB.ListTowers()
  1195  	if err != nil {
  1196  		return nil, err
  1197  	}
  1198  	clientSessions, err := c.cfg.DB.ListClientSessions(nil)
  1199  	if err != nil {
  1200  		return nil, err
  1201  	}
  1202  
  1203  	// Construct a lookup map that coalesces all of the sessions for a
  1204  	// specific watchtower.
  1205  	towerSessions := make(
  1206  		map[wtdb.TowerID]map[wtdb.SessionID]*wtdb.ClientSession,
  1207  	)
  1208  	for id, s := range clientSessions {
  1209  		sessions, ok := towerSessions[s.TowerID]
  1210  		if !ok {
  1211  			sessions = make(map[wtdb.SessionID]*wtdb.ClientSession)
  1212  			towerSessions[s.TowerID] = sessions
  1213  		}
  1214  		sessions[id] = s
  1215  	}
  1216  
  1217  	registeredTowers := make([]*RegisteredTower, 0, len(towerSessions))
  1218  	for _, tower := range towers {
  1219  		isActive := c.candidateTowers.IsActive(tower.ID)
  1220  		registeredTowers = append(registeredTowers, &RegisteredTower{
  1221  			Tower:                  tower,
  1222  			Sessions:               towerSessions[tower.ID],
  1223  			ActiveSessionCandidate: isActive,
  1224  		})
  1225  	}
  1226  
  1227  	return registeredTowers, nil
  1228  }
  1229  
  1230  // LookupTower retrieves a registered watchtower through its public key.
  1231  func (c *TowerClient) LookupTower(pubKey *secp256k1.PublicKey) (*RegisteredTower, error) {
  1232  	tower, err := c.cfg.DB.LoadTower(pubKey)
  1233  	if err != nil {
  1234  		return nil, err
  1235  	}
  1236  
  1237  	towerSessions, err := c.cfg.DB.ListClientSessions(&tower.ID)
  1238  	if err != nil {
  1239  		return nil, err
  1240  	}
  1241  
  1242  	return &RegisteredTower{
  1243  		Tower:                  tower,
  1244  		Sessions:               towerSessions,
  1245  		ActiveSessionCandidate: c.candidateTowers.IsActive(tower.ID),
  1246  	}, nil
  1247  }
  1248  
  1249  // Stats returns the in-memory statistics of the client since startup.
  1250  func (c *TowerClient) Stats() ClientStats {
  1251  	return c.stats.Copy()
  1252  }
  1253  
  1254  // Policy returns the active client policy configuration.
  1255  func (c *TowerClient) Policy() wtpolicy.Policy {
  1256  	return c.cfg.Policy
  1257  }
  1258  
  1259  // logMessage writes information about a message received from a remote peer,
  1260  // using directional prepositions to signal whether the message was sent or
  1261  // received.
  1262  func (c *TowerClient) logMessage(
  1263  	peer wtserver.Peer, msg wtwire.Message, read bool) {
  1264  
  1265  	var action = "Received"
  1266  	var preposition = "from"
  1267  	if !read {
  1268  		action = "Sending"
  1269  		preposition = "to"
  1270  	}
  1271  
  1272  	summary := wtwire.MessageSummary(msg)
  1273  	if len(summary) > 0 {
  1274  		summary = "(" + summary + ")"
  1275  	}
  1276  
  1277  	c.log.Debugf("%s %s%v %s %x@%s", action, msg.MsgType(), summary,
  1278  		preposition, peer.RemotePub().SerializeCompressed(),
  1279  		peer.RemoteAddr())
  1280  }