github.com/decred/dcrlnd@v0.7.6/discovery/sync_manager.go (about)

     1  package discovery
     2  
     3  import (
     4  	"errors"
     5  	"sync"
     6  	"sync/atomic"
     7  	"time"
     8  
     9  	"github.com/decred/dcrd/chaincfg/chainhash"
    10  	"github.com/decred/dcrlnd/lnpeer"
    11  	"github.com/decred/dcrlnd/lnwire"
    12  	"github.com/decred/dcrlnd/routing/route"
    13  	"github.com/decred/dcrlnd/ticker"
    14  )
    15  
    16  const (
    17  	// DefaultSyncerRotationInterval is the default interval in which we'll
    18  	// rotate a single active syncer.
    19  	DefaultSyncerRotationInterval = 20 * time.Minute
    20  
    21  	// DefaultHistoricalSyncInterval is the default interval in which we'll
    22  	// force a historical sync to ensure we have as much of the public
    23  	// network as possible.
    24  	DefaultHistoricalSyncInterval = time.Hour
    25  )
    26  
    27  var (
    28  	// ErrSyncManagerExiting is an error returned when we attempt to
    29  	// start/stop a gossip syncer for a connected/disconnected peer, but the
    30  	// SyncManager has already been stopped.
    31  	ErrSyncManagerExiting = errors.New("sync manager exiting")
    32  )
    33  
    34  // newSyncer in an internal message we'll use within the SyncManager to signal
    35  // that we should create a GossipSyncer for a newly connected peer.
    36  type newSyncer struct {
    37  	// peer is the newly connected peer.
    38  	peer lnpeer.Peer
    39  
    40  	// doneChan serves as a signal to the caller that the SyncManager's
    41  	// internal state correctly reflects the stale active syncer.
    42  	doneChan chan struct{}
    43  }
    44  
    45  // staleSyncer is an internal message we'll use within the SyncManager to signal
    46  // that a peer has disconnected and its GossipSyncer should be removed.
    47  type staleSyncer struct {
    48  	// peer is the peer that has disconnected.
    49  	peer route.Vertex
    50  
    51  	// doneChan serves as a signal to the caller that the SyncManager's
    52  	// internal state correctly reflects the stale active syncer. This is
    53  	// needed to ensure we always create a new syncer for a flappy peer
    54  	// after they disconnect if they happened to be an active syncer.
    55  	doneChan chan struct{}
    56  }
    57  
    58  // SyncManagerCfg contains all of the dependencies required for the SyncManager
    59  // to carry out its duties.
    60  type SyncManagerCfg struct {
    61  	// ChainHash is a hash that indicates the specific network of the active
    62  	// chain.
    63  	ChainHash chainhash.Hash
    64  
    65  	// ChanSeries is an interface that provides access to a time series view
    66  	// of the current known channel graph. Each GossipSyncer enabled peer
    67  	// will utilize this in order to create and respond to channel graph
    68  	// time series queries.
    69  	ChanSeries ChannelGraphTimeSeries
    70  
    71  	// GossiperState is an interface that provides functions to persist
    72  	// data about the state of individual peer gossipers.
    73  	GossiperState GossiperState
    74  
    75  	// NumActiveSyncers is the number of peers for which we should have
    76  	// active syncers with. After reaching NumActiveSyncers, any future
    77  	// gossip syncers will be passive.
    78  	NumActiveSyncers int
    79  
    80  	// RotateTicker is a ticker responsible for notifying the SyncManager
    81  	// when it should rotate its active syncers. A single active syncer with
    82  	// a chansSynced state will be exchanged for a passive syncer in order
    83  	// to ensure we don't keep syncing with the same peers.
    84  	RotateTicker ticker.Ticker
    85  
    86  	// HistoricalSyncTicker is a ticker responsible for notifying the
    87  	// SyncManager when it should attempt a historical sync with a gossip
    88  	// sync peer.
    89  	HistoricalSyncTicker ticker.Ticker
    90  
    91  	// IgnoreHistoricalFilters will prevent syncers from replying with
    92  	// historical data when the remote peer sets a gossip_timestamp_range.
    93  	// This prevents ranges with old start times from causing us to dump the
    94  	// graph on connect.
    95  	IgnoreHistoricalFilters bool
    96  
    97  	// BestHeight returns the latest height known of the chain.
    98  	BestHeight func() uint32
    99  
   100  	// PinnedSyncers is a set of peers that will always transition to
   101  	// ActiveSync upon connection. These peers will never transition to
   102  	// PassiveSync.
   103  	PinnedSyncers PinnedSyncers
   104  }
   105  
   106  // SyncManager is a subsystem of the gossiper that manages the gossip syncers
   107  // for peers currently connected. When a new peer is connected, the manager will
   108  // create its accompanying gossip syncer and determine whether it should have an
   109  // ActiveSync or PassiveSync sync type based on how many other gossip syncers
   110  // are currently active. Any ActiveSync gossip syncers are started in a
   111  // round-robin manner to ensure we're not syncing with multiple peers at the
   112  // same time. The first GossipSyncer registered with the SyncManager will
   113  // attempt a historical sync to ensure we have as much of the public channel
   114  // graph as possible.
   115  type SyncManager struct {
   116  	// initialHistoricalSyncCompleted serves as a barrier when initializing
   117  	// new active GossipSyncers. If 0, the initial historical sync has not
   118  	// completed, so we'll defer initializing any active GossipSyncers. If
   119  	// 1, then we can transition the GossipSyncer immediately. We set up
   120  	// this barrier to ensure we have most of the graph before attempting to
   121  	// accept new updates at tip.
   122  	//
   123  	// NOTE: This must be used atomically.
   124  	initialHistoricalSyncCompleted int32
   125  
   126  	start sync.Once
   127  	stop  sync.Once
   128  
   129  	cfg SyncManagerCfg
   130  
   131  	// newSyncers is a channel we'll use to process requests to create
   132  	// GossipSyncers for newly connected peers.
   133  	newSyncers chan *newSyncer
   134  
   135  	// staleSyncers is a channel we'll use to process requests to tear down
   136  	// GossipSyncers for disconnected peers.
   137  	staleSyncers chan *staleSyncer
   138  
   139  	// syncersMu guards the read and write access to the activeSyncers and
   140  	// inactiveSyncers maps below.
   141  	syncersMu sync.Mutex
   142  
   143  	// activeSyncers is the set of all syncers for which we are currently
   144  	// receiving graph updates from. The number of possible active syncers
   145  	// is bounded by NumActiveSyncers.
   146  	activeSyncers map[route.Vertex]*GossipSyncer
   147  
   148  	// inactiveSyncers is the set of all syncers for which we are not
   149  	// currently receiving new graph updates from.
   150  	inactiveSyncers map[route.Vertex]*GossipSyncer
   151  
   152  	// pinnedActiveSyncers is the set of all syncers which are pinned into
   153  	// an active sync. Pinned peers performan an initial historical sync on
   154  	// each connection and will continue to receive graph updates for the
   155  	// duration of the connection.
   156  	pinnedActiveSyncers map[route.Vertex]*GossipSyncer
   157  
   158  	wg   sync.WaitGroup
   159  	quit chan struct{}
   160  }
   161  
   162  // newSyncManager constructs a new SyncManager backed by the given config.
   163  func newSyncManager(cfg *SyncManagerCfg) *SyncManager {
   164  	return &SyncManager{
   165  		cfg:          *cfg,
   166  		newSyncers:   make(chan *newSyncer),
   167  		staleSyncers: make(chan *staleSyncer),
   168  		activeSyncers: make(
   169  			map[route.Vertex]*GossipSyncer, cfg.NumActiveSyncers,
   170  		),
   171  		inactiveSyncers: make(map[route.Vertex]*GossipSyncer),
   172  		pinnedActiveSyncers: make(
   173  			map[route.Vertex]*GossipSyncer, len(cfg.PinnedSyncers),
   174  		),
   175  		quit: make(chan struct{}),
   176  	}
   177  }
   178  
   179  // Start starts the SyncManager in order to properly carry out its duties.
   180  func (m *SyncManager) Start() {
   181  	m.start.Do(func() {
   182  		m.wg.Add(1)
   183  		go m.syncerHandler()
   184  	})
   185  }
   186  
   187  // Stop stops the SyncManager from performing its duties.
   188  func (m *SyncManager) Stop() {
   189  	m.stop.Do(func() {
   190  		close(m.quit)
   191  		m.wg.Wait()
   192  
   193  		for _, syncer := range m.inactiveSyncers {
   194  			syncer.Stop()
   195  		}
   196  		for _, syncer := range m.activeSyncers {
   197  			syncer.Stop()
   198  		}
   199  	})
   200  }
   201  
   202  // syncerHandler is the SyncManager's main event loop responsible for:
   203  //
   204  // 1. Creating and tearing down GossipSyncers for connected/disconnected peers.
   205  
   206  // 2. Finding new peers to receive graph updates from to ensure we don't only
   207  //    receive them from the same set of peers.
   208  
   209  //  3. Finding new peers to force a historical sync with to ensure we have as
   210  //     much of the public network as possible.
   211  //
   212  // NOTE: This must be run as a goroutine.
   213  func (m *SyncManager) syncerHandler() {
   214  	defer m.wg.Done()
   215  
   216  	m.cfg.RotateTicker.Resume()
   217  	defer m.cfg.RotateTicker.Stop()
   218  
   219  	defer m.cfg.HistoricalSyncTicker.Stop()
   220  
   221  	var (
   222  		// initialHistoricalSyncer is the syncer we are currently
   223  		// performing an initial historical sync with.
   224  		initialHistoricalSyncer *GossipSyncer
   225  
   226  		// initialHistoricalSyncSignal is a signal that will fire once
   227  		// the intiial historical sync has been completed. This is
   228  		// crucial to ensure that another historical sync isn't
   229  		// attempted just because the initialHistoricalSyncer was
   230  		// disconnected.
   231  		initialHistoricalSyncSignal chan struct{}
   232  	)
   233  
   234  	setInitialHistoricalSyncer := func(s *GossipSyncer) {
   235  		initialHistoricalSyncer = s
   236  		initialHistoricalSyncSignal = s.ResetSyncedSignal()
   237  
   238  		// Restart the timer for our new historical sync peer. This will
   239  		// ensure that all initial syncers recevie an equivalent
   240  		// duration before attempting the next sync. Without doing so we
   241  		// might attempt two historical sync back to back if a peer
   242  		// disconnects just before the ticker fires.
   243  		m.cfg.HistoricalSyncTicker.Pause()
   244  		m.cfg.HistoricalSyncTicker.Resume()
   245  	}
   246  
   247  	for {
   248  		select {
   249  		// A new peer has been connected, so we'll create its
   250  		// accompanying GossipSyncer.
   251  		case newSyncer := <-m.newSyncers:
   252  			// If we already have a syncer, then we'll exit early as
   253  			// we don't want to override it.
   254  			if _, ok := m.GossipSyncer(newSyncer.peer.PubKey()); ok {
   255  				close(newSyncer.doneChan)
   256  				continue
   257  			}
   258  
   259  			s := m.createGossipSyncer(newSyncer.peer)
   260  
   261  			isPinnedSyncer := m.isPinnedSyncer(s)
   262  
   263  			// attemptHistoricalSync determines whether we should
   264  			// attempt an initial historical sync when a new peer
   265  			// connects.
   266  			attemptHistoricalSync := false
   267  
   268  			m.syncersMu.Lock()
   269  			switch {
   270  			// For pinned syncers, we will immediately transition
   271  			// the peer into an active (pinned) sync state.
   272  			case isPinnedSyncer:
   273  				attemptHistoricalSync = true
   274  				s.setSyncType(PinnedSync)
   275  				s.setSyncState(syncerIdle)
   276  				m.pinnedActiveSyncers[s.cfg.peerPub] = s
   277  
   278  			// Regardless of whether the initial historical sync
   279  			// has completed, we'll re-trigger a historical sync if
   280  			// we no longer have any syncers. This might be
   281  			// necessary if we lost all our peers at one point, and
   282  			// now we finally have one again.
   283  			case len(m.activeSyncers) == 0 &&
   284  				len(m.inactiveSyncers) == 0:
   285  
   286  				attemptHistoricalSync =
   287  					m.cfg.NumActiveSyncers > 0
   288  				fallthrough
   289  
   290  			// If we've exceeded our total number of active syncers,
   291  			// we'll initialize this GossipSyncer as passive.
   292  			case len(m.activeSyncers) >= m.cfg.NumActiveSyncers:
   293  				fallthrough
   294  
   295  			// If the initial historical sync has yet to complete,
   296  			// then we'll declare it as passive and attempt to
   297  			// transition it when the initial historical sync
   298  			// completes.
   299  			case !m.IsGraphSynced():
   300  				s.setSyncType(PassiveSync)
   301  				m.inactiveSyncers[s.cfg.peerPub] = s
   302  
   303  			// The initial historical sync has completed, so we can
   304  			// immediately start the GossipSyncer as active.
   305  			default:
   306  				s.setSyncType(ActiveSync)
   307  				m.activeSyncers[s.cfg.peerPub] = s
   308  			}
   309  			m.syncersMu.Unlock()
   310  
   311  			s.Start()
   312  
   313  			// Once we create the GossipSyncer, we'll signal to the
   314  			// caller that they can proceed since the SyncManager's
   315  			// internal state has been updated.
   316  			close(newSyncer.doneChan)
   317  
   318  			// We'll force a historical sync with the first peer we
   319  			// connect to, to ensure we get as much of the graph as
   320  			// possible.
   321  			if !attemptHistoricalSync {
   322  				continue
   323  			}
   324  
   325  			log.Debugf("Attempting initial historical sync with "+
   326  				"GossipSyncer(%x)", s.cfg.peerPub)
   327  
   328  			if err := s.historicalSync(); err != nil {
   329  				log.Errorf("Unable to attempt initial "+
   330  					"historical sync with "+
   331  					"GossipSyncer(%x): %v", s.cfg.peerPub,
   332  					err)
   333  				continue
   334  			}
   335  
   336  			// Once the historical sync has started, we'll get a
   337  			// keep track of the corresponding syncer to properly
   338  			// handle disconnects. We'll also use a signal to know
   339  			// when the historical sync completed.
   340  			if !isPinnedSyncer {
   341  				setInitialHistoricalSyncer(s)
   342  			}
   343  
   344  		// An existing peer has disconnected, so we'll tear down its
   345  		// corresponding GossipSyncer.
   346  		case staleSyncer := <-m.staleSyncers:
   347  			// Once the corresponding GossipSyncer has been stopped
   348  			// and removed, we'll signal to the caller that they can
   349  			// proceed since the SyncManager's internal state has
   350  			// been updated.
   351  			m.removeGossipSyncer(staleSyncer.peer)
   352  			close(staleSyncer.doneChan)
   353  
   354  			// If we don't have an initialHistoricalSyncer, or we do
   355  			// but it is not the peer being disconnected, then we
   356  			// have nothing left to do and can proceed.
   357  			switch {
   358  			case initialHistoricalSyncer == nil:
   359  				fallthrough
   360  			case staleSyncer.peer != initialHistoricalSyncer.cfg.peerPub:
   361  				fallthrough
   362  			case m.cfg.NumActiveSyncers == 0:
   363  				continue
   364  			}
   365  
   366  			// Otherwise, our initialHistoricalSyncer corresponds to
   367  			// the peer being disconnected, so we'll have to find a
   368  			// replacement.
   369  			log.Debug("Finding replacement for intitial " +
   370  				"historical sync")
   371  
   372  			s := m.forceHistoricalSync()
   373  			if s == nil {
   374  				log.Debug("No eligible replacement found " +
   375  					"for initial historical sync")
   376  				continue
   377  			}
   378  
   379  			log.Debugf("Replaced initial historical "+
   380  				"GossipSyncer(%v) with GossipSyncer(%x)",
   381  				staleSyncer.peer, s.cfg.peerPub)
   382  
   383  			setInitialHistoricalSyncer(s)
   384  
   385  		// Our initial historical sync signal has completed, so we'll
   386  		// nil all of the relevant fields as they're no longer needed.
   387  		case <-initialHistoricalSyncSignal:
   388  			initialHistoricalSyncer = nil
   389  			initialHistoricalSyncSignal = nil
   390  
   391  			log.Debug("Initial historical sync completed")
   392  
   393  			// With the initial historical sync complete, we can
   394  			// begin receiving new graph updates at tip. We'll
   395  			// determine whether we can have any more active
   396  			// GossipSyncers. If we do, we'll randomly select some
   397  			// that are currently passive to transition.
   398  			m.syncersMu.Lock()
   399  			numActiveLeft := m.cfg.NumActiveSyncers - len(m.activeSyncers)
   400  			if numActiveLeft <= 0 {
   401  				m.syncersMu.Unlock()
   402  				continue
   403  			}
   404  
   405  			// We may not even have enough inactive syncers to be
   406  			// transitted. In that case, we will transit all the
   407  			// inactive syncers.
   408  			if len(m.inactiveSyncers) < numActiveLeft {
   409  				numActiveLeft = len(m.inactiveSyncers)
   410  			}
   411  
   412  			log.Debugf("Attempting to transition %v passive "+
   413  				"GossipSyncers to active", numActiveLeft)
   414  
   415  			for i := 0; i < numActiveLeft; i++ {
   416  				chooseRandomSyncer(
   417  					m.inactiveSyncers, m.transitionPassiveSyncer,
   418  				)
   419  			}
   420  
   421  			m.syncersMu.Unlock()
   422  
   423  		// Our RotateTicker has ticked, so we'll attempt to rotate a
   424  		// single active syncer with a passive one.
   425  		case <-m.cfg.RotateTicker.Ticks():
   426  			m.rotateActiveSyncerCandidate()
   427  
   428  		// Our HistoricalSyncTicker has ticked, so we'll randomly select
   429  		// a peer and force a historical sync with them.
   430  		case <-m.cfg.HistoricalSyncTicker.Ticks():
   431  			// To be extra cautious, gate the forceHistoricalSync
   432  			// call such that it can only execute if we are
   433  			// configured to have a non-zero number of sync peers.
   434  			// This way even if the historical sync ticker manages
   435  			// to tick we can be sure that a historical sync won't
   436  			// accidentally begin.
   437  			if m.cfg.NumActiveSyncers == 0 {
   438  				continue
   439  			}
   440  
   441  			// If we don't have a syncer available we have nothing
   442  			// to do.
   443  			s := m.forceHistoricalSync()
   444  			if s == nil {
   445  				continue
   446  			}
   447  
   448  			// If we've already completed a historical sync, we'll
   449  			// skip setting the initial historical syncer.
   450  			if m.IsGraphSynced() {
   451  				continue
   452  			}
   453  
   454  			// Otherwise, we'll track the peer we've performed a
   455  			// historical sync with in order to handle the case
   456  			// where our previous historical sync peer did not
   457  			// respond to our queries and we haven't ingested as
   458  			// much of the graph as we should.
   459  			setInitialHistoricalSyncer(s)
   460  
   461  		case <-m.quit:
   462  			return
   463  		}
   464  	}
   465  }
   466  
   467  // isPinnedSyncer returns true if the passed GossipSyncer is one of our pinned
   468  // sync peers.
   469  func (m *SyncManager) isPinnedSyncer(s *GossipSyncer) bool {
   470  	_, isPinnedSyncer := m.cfg.PinnedSyncers[s.cfg.peerPub]
   471  	return isPinnedSyncer
   472  }
   473  
   474  // createGossipSyncer creates the GossipSyncer for a newly connected peer.
   475  func (m *SyncManager) createGossipSyncer(peer lnpeer.Peer) *GossipSyncer {
   476  	nodeID := route.Vertex(peer.PubKey())
   477  	log.Infof("Creating new GossipSyncer for peer=%x", nodeID[:])
   478  
   479  	encoding := lnwire.EncodingSortedPlain
   480  	s := newGossipSyncer(gossipSyncerCfg{
   481  		chainHash:     m.cfg.ChainHash,
   482  		peerPub:       nodeID,
   483  		channelSeries: m.cfg.ChanSeries,
   484  		gossiperState: m.cfg.GossiperState,
   485  		encodingType:  encoding,
   486  		chunkSize:     encodingTypeToChunkSize[encoding],
   487  		batchSize:     requestBatchSize,
   488  		sendToPeer: func(msgs ...lnwire.Message) error {
   489  			return peer.SendMessageLazy(false, msgs...)
   490  		},
   491  		sendToPeerSync: func(msgs ...lnwire.Message) error {
   492  			return peer.SendMessageLazy(true, msgs...)
   493  		},
   494  		ignoreHistoricalFilters:   m.cfg.IgnoreHistoricalFilters,
   495  		maxUndelayedQueryReplies:  DefaultMaxUndelayedQueryReplies,
   496  		delayedQueryReplyInterval: DefaultDelayedQueryReplyInterval,
   497  		bestHeight:                m.cfg.BestHeight,
   498  		markGraphSynced:           m.markGraphSynced,
   499  		maxQueryChanRangeReplies:  maxQueryChanRangeReplies,
   500  	})
   501  
   502  	// Gossip syncers are initialized by default in a PassiveSync type
   503  	// and chansSynced state so that they can reply to any peer queries or
   504  	// handle any sync transitions.
   505  	s.setSyncState(chansSynced)
   506  	s.setSyncType(PassiveSync)
   507  
   508  	log.Debugf("Created new GossipSyncer[state=%s type=%s] for peer=%v",
   509  		s.syncState(), s.SyncType(), peer)
   510  
   511  	return s
   512  }
   513  
   514  // removeGossipSyncer removes all internal references to the disconnected peer's
   515  // GossipSyncer and stops it. In the event of an active GossipSyncer being
   516  // disconnected, a passive GossipSyncer, if any, will take its place.
   517  func (m *SyncManager) removeGossipSyncer(peer route.Vertex) {
   518  	m.syncersMu.Lock()
   519  	defer m.syncersMu.Unlock()
   520  
   521  	s, ok := m.gossipSyncer(peer)
   522  	if !ok {
   523  		return
   524  	}
   525  
   526  	log.Infof("Removing GossipSyncer for peer=%v", peer)
   527  
   528  	// We'll stop the GossipSyncer for the disconnected peer in a goroutine
   529  	// to prevent blocking the SyncManager.
   530  	go s.Stop()
   531  
   532  	// If it's a non-active syncer, then we can just exit now.
   533  	if _, ok := m.inactiveSyncers[peer]; ok {
   534  		delete(m.inactiveSyncers, peer)
   535  		return
   536  	}
   537  
   538  	// If it's a pinned syncer, then we can just exit as this doesn't
   539  	// affect our active syncer count.
   540  	if _, ok := m.pinnedActiveSyncers[peer]; ok {
   541  		delete(m.pinnedActiveSyncers, peer)
   542  		return
   543  	}
   544  
   545  	// Otherwise, we'll need find a new one to replace it, if any.
   546  	delete(m.activeSyncers, peer)
   547  	newActiveSyncer := chooseRandomSyncer(
   548  		m.inactiveSyncers, m.transitionPassiveSyncer,
   549  	)
   550  	if newActiveSyncer == nil {
   551  		return
   552  	}
   553  
   554  	log.Debugf("Replaced active GossipSyncer(%x) with GossipSyncer(%x)",
   555  		peer, newActiveSyncer.cfg.peerPub)
   556  }
   557  
   558  // rotateActiveSyncerCandidate rotates a single active syncer. In order to
   559  // achieve this, the active syncer must be in a chansSynced state in order to
   560  // process the sync transition.
   561  func (m *SyncManager) rotateActiveSyncerCandidate() {
   562  	m.syncersMu.Lock()
   563  	defer m.syncersMu.Unlock()
   564  
   565  	// If we couldn't find an eligible active syncer to rotate, we can
   566  	// return early.
   567  	activeSyncer := chooseRandomSyncer(m.activeSyncers, nil)
   568  	if activeSyncer == nil {
   569  		log.Debug("No eligible active syncer to rotate")
   570  		return
   571  	}
   572  
   573  	// Similarly, if we don't have a candidate to rotate with, we can return
   574  	// early as well.
   575  	candidate := chooseRandomSyncer(m.inactiveSyncers, nil)
   576  	if candidate == nil {
   577  		log.Debug("No eligible candidate to rotate active syncer")
   578  		return
   579  	}
   580  
   581  	// Otherwise, we'll attempt to transition each syncer to their
   582  	// respective new sync type.
   583  	log.Debugf("Rotating active GossipSyncer(%x) with GossipSyncer(%x)",
   584  		activeSyncer.cfg.peerPub, candidate.cfg.peerPub)
   585  
   586  	if err := m.transitionActiveSyncer(activeSyncer); err != nil {
   587  		log.Errorf("Unable to transition active GossipSyncer(%x): %v",
   588  			activeSyncer.cfg.peerPub, err)
   589  		return
   590  	}
   591  
   592  	if err := m.transitionPassiveSyncer(candidate); err != nil {
   593  		log.Errorf("Unable to transition passive GossipSyncer(%x): %v",
   594  			activeSyncer.cfg.peerPub, err)
   595  		return
   596  	}
   597  }
   598  
   599  // transitionActiveSyncer transitions an active syncer to a passive one.
   600  //
   601  // NOTE: This must be called with the syncersMu lock held.
   602  func (m *SyncManager) transitionActiveSyncer(s *GossipSyncer) error {
   603  	log.Debugf("Transitioning active GossipSyncer(%x) to passive",
   604  		s.cfg.peerPub)
   605  
   606  	if err := s.ProcessSyncTransition(PassiveSync); err != nil {
   607  		return err
   608  	}
   609  
   610  	delete(m.activeSyncers, s.cfg.peerPub)
   611  	m.inactiveSyncers[s.cfg.peerPub] = s
   612  
   613  	return nil
   614  }
   615  
   616  // transitionPassiveSyncer transitions a passive syncer to an active one.
   617  //
   618  // NOTE: This must be called with the syncersMu lock held.
   619  func (m *SyncManager) transitionPassiveSyncer(s *GossipSyncer) error {
   620  	log.Debugf("Transitioning passive GossipSyncer(%x) to active",
   621  		s.cfg.peerPub)
   622  
   623  	if err := s.ProcessSyncTransition(ActiveSync); err != nil {
   624  		return err
   625  	}
   626  
   627  	delete(m.inactiveSyncers, s.cfg.peerPub)
   628  	m.activeSyncers[s.cfg.peerPub] = s
   629  
   630  	return nil
   631  }
   632  
   633  // forceHistoricalSync chooses a syncer with a remote peer at random and forces
   634  // a historical sync with it.
   635  func (m *SyncManager) forceHistoricalSync() *GossipSyncer {
   636  	m.syncersMu.Lock()
   637  	defer m.syncersMu.Unlock()
   638  
   639  	// We'll sample from both sets of active and inactive syncers in the
   640  	// event that we don't have any inactive syncers.
   641  	return chooseRandomSyncer(m.gossipSyncers(), func(s *GossipSyncer) error {
   642  		return s.historicalSync()
   643  	})
   644  }
   645  
   646  // chooseRandomSyncer iterates through the set of syncers given and returns the
   647  // first one which was able to successfully perform the action enclosed in the
   648  // function closure.
   649  //
   650  // NOTE: It's possible for a nil value to be returned if there are no eligible
   651  // candidate syncers.
   652  func chooseRandomSyncer(syncers map[route.Vertex]*GossipSyncer,
   653  	action func(*GossipSyncer) error) *GossipSyncer {
   654  
   655  	for _, s := range syncers {
   656  		// Only syncers in a chansSynced state are viable for sync
   657  		// transitions, so skip any that aren't.
   658  		if s.syncState() != chansSynced {
   659  			continue
   660  		}
   661  
   662  		if action != nil {
   663  			if err := action(s); err != nil {
   664  				log.Debugf("Skipping eligible candidate "+
   665  					"GossipSyncer(%x): %v", s.cfg.peerPub,
   666  					err)
   667  				continue
   668  			}
   669  		}
   670  
   671  		return s
   672  	}
   673  
   674  	return nil
   675  }
   676  
   677  // InitSyncState is called by outside sub-systems when a connection is
   678  // established to a new peer that understands how to perform channel range
   679  // queries. We'll allocate a new GossipSyncer for it, and start any goroutines
   680  // needed to handle new queries. The first GossipSyncer registered with the
   681  // SyncManager will attempt a historical sync to ensure we have as much of the
   682  // public channel graph as possible.
   683  //
   684  // TODO(wilmer): Only mark as ActiveSync if this isn't a channel peer.
   685  func (m *SyncManager) InitSyncState(peer lnpeer.Peer) error {
   686  	done := make(chan struct{})
   687  
   688  	select {
   689  	case m.newSyncers <- &newSyncer{
   690  		peer:     peer,
   691  		doneChan: done,
   692  	}:
   693  	case <-m.quit:
   694  		return ErrSyncManagerExiting
   695  	}
   696  
   697  	select {
   698  	case <-done:
   699  		return nil
   700  	case <-m.quit:
   701  		return ErrSyncManagerExiting
   702  	}
   703  }
   704  
   705  // PruneSyncState is called by outside sub-systems once a peer that we were
   706  // previously connected to has been disconnected. In this case we can stop the
   707  // existing GossipSyncer assigned to the peer and free up resources.
   708  func (m *SyncManager) PruneSyncState(peer route.Vertex) {
   709  	done := make(chan struct{})
   710  
   711  	// We avoid returning an error when the SyncManager is stopped since the
   712  	// GossipSyncer will be stopped then anyway.
   713  	select {
   714  	case m.staleSyncers <- &staleSyncer{
   715  		peer:     peer,
   716  		doneChan: done,
   717  	}:
   718  	case <-m.quit:
   719  		return
   720  	}
   721  
   722  	select {
   723  	case <-done:
   724  	case <-m.quit:
   725  	}
   726  }
   727  
   728  // GossipSyncer returns the associated gossip syncer of a peer. The boolean
   729  // returned signals whether there exists a gossip syncer for the peer.
   730  func (m *SyncManager) GossipSyncer(peer route.Vertex) (*GossipSyncer, bool) {
   731  	m.syncersMu.Lock()
   732  	defer m.syncersMu.Unlock()
   733  	return m.gossipSyncer(peer)
   734  }
   735  
   736  // gossipSyncer returns the associated gossip syncer of a peer. The boolean
   737  // returned signals whether there exists a gossip syncer for the peer.
   738  func (m *SyncManager) gossipSyncer(peer route.Vertex) (*GossipSyncer, bool) {
   739  	syncer, ok := m.inactiveSyncers[peer]
   740  	if ok {
   741  		return syncer, true
   742  	}
   743  	syncer, ok = m.activeSyncers[peer]
   744  	if ok {
   745  		return syncer, true
   746  	}
   747  	syncer, ok = m.pinnedActiveSyncers[peer]
   748  	if ok {
   749  		return syncer, true
   750  	}
   751  	return nil, false
   752  }
   753  
   754  // GossipSyncers returns all of the currently initialized gossip syncers.
   755  func (m *SyncManager) GossipSyncers() map[route.Vertex]*GossipSyncer {
   756  	m.syncersMu.Lock()
   757  	defer m.syncersMu.Unlock()
   758  	return m.gossipSyncers()
   759  }
   760  
   761  // gossipSyncers returns all of the currently initialized gossip syncers.
   762  func (m *SyncManager) gossipSyncers() map[route.Vertex]*GossipSyncer {
   763  	numSyncers := len(m.inactiveSyncers) + len(m.activeSyncers)
   764  	syncers := make(map[route.Vertex]*GossipSyncer, numSyncers)
   765  
   766  	for _, syncer := range m.inactiveSyncers {
   767  		syncers[syncer.cfg.peerPub] = syncer
   768  	}
   769  	for _, syncer := range m.activeSyncers {
   770  		syncers[syncer.cfg.peerPub] = syncer
   771  	}
   772  
   773  	return syncers
   774  }
   775  
   776  // markGraphSynced allows us to report that the initial historical sync has
   777  // completed.
   778  func (m *SyncManager) markGraphSynced() {
   779  	atomic.StoreInt32(&m.initialHistoricalSyncCompleted, 1)
   780  }
   781  
   782  // IsGraphSynced determines whether we've completed our initial historical sync.
   783  // The initial historical sync is done to ensure we've ingested as much of the
   784  // public graph as possible.
   785  func (m *SyncManager) IsGraphSynced() bool {
   786  	return atomic.LoadInt32(&m.initialHistoricalSyncCompleted) == 1
   787  }