github.com/mailgun/holster/v4@v4.20.0/election/election.go (about)

     1  package election
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"math/rand"
     8  	"net"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	"github.com/mailgun/holster/v4/setter"
    13  	"github.com/mailgun/holster/v4/slice"
    14  	"github.com/mailgun/holster/v4/syncutil"
    15  	"github.com/sirupsen/logrus"
    16  )
    17  
    18  type NodeState GetStateResp
    19  type state uint32
    20  
    21  const (
    22  	// followerState means we are following the leader and expect
    23  	// to get heart beats regularly. This is the initial state, as
    24  	// we don't want to force an election when a new node joins
    25  	// the cluster.
    26  	followerState state = iota
    27  	// candidateState means we are actively attempting to become leader
    28  	candidateState
    29  	// leaderState means we have received a quorum of votes while
    30  	// in candidateState and have assumed leadership.
    31  	leaderState
    32  	// shutdownState means we are in the process of shutting down
    33  	shutdownState
    34  )
    35  
    36  var ErrNotLeader = errors.New("not the leader")
    37  
    38  func (s state) String() string {
    39  	switch s {
    40  	case followerState:
    41  		return "Follower"
    42  	case candidateState:
    43  		return "Candidate"
    44  	case leaderState:
    45  		return "Leader"
    46  	case shutdownState:
    47  		return "Shutdown"
    48  	default:
    49  		return "Unknown"
    50  	}
    51  }
    52  
    53  type SendRPCFunc func(context.Context, string, RPCRequest, *RPCResponse) error
    54  
    55  type Config struct {
    56  	// How long we should wait for a single network operation to complete.
    57  	NetworkTimeout time.Duration
    58  
    59  	// How long followers should wait before they decide the leader
    60  	// lost connection to peers and therefore start a new election.
    61  	HeartBeatTimeout time.Duration
    62  
    63  	// How long candidates should wait for an election to complete
    64  	// before starting a new one.
    65  	ElectionTimeout time.Duration
    66  
    67  	// How long the leader should wait on heart beat responses from
    68  	// followers before it decides to step down as leader and start a
    69  	// new election.
    70  	LeaderQuorumTimeout time.Duration
    71  
    72  	// The minimum number of peers that are required to form a cluster and elect a leader.
    73  	// This is to prevent a small number of nodes (or a single node) that gets disconnected
    74  	// from the cluster to elect a leader (assuming the peer list is updated to exclude the
    75  	// disconnected peers). Instead nodes will wait until connectivity is restored
    76  	// to the quorum of the cluster. The default is zero, which means if a single node is
    77  	// disconnected from the cluster, and it's peer list only includes it's self, it will elect
    78  	// itself leader. If we set MinimumQuorum = 2 then no leader will be elected until the peer
    79  	// list includes at least 2 peers and a successful vote has completed.
    80  	MinimumQuorum int
    81  
    82  	// The Initial list of peers to be considered in the election, including ourself.
    83  	Peers []string
    84  
    85  	// The unique id this peer identifies itself as, as found in the Peers list.
    86  	// This is typically an ip:port address the node is listening to for RPC requests.
    87  	UniqueID string
    88  
    89  	// Called when the leader changes
    90  	OnUpdate OnUpdate
    91  
    92  	// The logger used errors and warning
    93  	Log logrus.FieldLogger
    94  
    95  	// Sends an RPC request to a peer, This function must be provided and can
    96  	// utilize any network communication the implementer wishes. If context cancelled
    97  	// should return an error.
    98  	SendRPC SendRPCFunc
    99  }
   100  
   101  type OnUpdate func(string)
   102  
   103  type Node interface {
   104  	// Starts the main election loop.
   105  	Start(ctx context.Context) error
   106  
   107  	// Cancels the election, resigns if we are leader and waits for all go
   108  	// routines to complete before returning.
   109  	Stop(ctx context.Context) error
   110  
   111  	// Set the list of peers to be considered for the election
   112  	SetPeers(ctx context.Context, peers []string) error
   113  
   114  	// If leader, resigns as leader and starts a new election that we will not
   115  	// participate in. returns ErrNotLeader if not currently the leader
   116  	Resign(ctx context.Context) error
   117  
   118  	// IsLeader is a convenience function that calls GetState() and returns true
   119  	// if this node was elected leader. May block if main loop is occupied.
   120  	IsLeader() bool
   121  
   122  	// GetLeader is a convenience function that calls GetState() returns the
   123  	// unique id of the node that is currently leader. May block if main loop is occupied.
   124  	GetLeader() string
   125  
   126  	// Returns the current state of this node
   127  	GetState(ctx context.Context) (NodeState, error)
   128  
   129  	// Called when this peer receives a RPC request from a peer
   130  	ReceiveRPC(RPCRequest, *RPCResponse)
   131  }
   132  
   133  type node struct {
   134  	conf  Config // The election configuration
   135  	state state  // Current state of our node
   136  	vote  struct {
   137  		CurrentTerm   uint64
   138  		LastTerm      uint64
   139  		LastCandidate string
   140  	} // Current state of the vote
   141  	currentTerm uint64          // The current term of the election when in candidate state
   142  	rpcCh       chan RPCRequest // RPC Response channel, listen for for RPC responses on this channel
   143  	self        string          // Our name
   144  	peers       []string
   145  	leader      string
   146  	lastContact time.Time     // The last successful contact with the leader (if we are a follower)
   147  	shutdownCh  chan struct{} // Signals we are in shutdown
   148  	log         logrus.FieldLogger
   149  	wg          syncutil.WaitGroup
   150  	running     int64
   151  }
   152  
   153  // Creates a new node. You must call Start() to be participate in the election.
   154  func NewNode(conf Config) (Node, error) {
   155  	if conf.UniqueID == "" {
   156  		return nil, errors.New("refusing to spawn a new node with no Config.UniqueID defined")
   157  	}
   158  
   159  	if conf.SendRPC == nil {
   160  		return nil, errors.New("refusing to spawn a new node with no Config.SendRPC defined")
   161  	}
   162  
   163  	setter.SetDefault(&conf.Log, logrus.WithField("id", conf.UniqueID))
   164  	setter.SetDefault(&conf.LeaderQuorumTimeout, time.Second*12)
   165  	setter.SetDefault(&conf.HeartBeatTimeout, time.Second*6)
   166  	setter.SetDefault(&conf.ElectionTimeout, time.Second*6)
   167  	setter.SetDefault(&conf.NetworkTimeout, time.Second*3)
   168  
   169  	c := &node{
   170  		rpcCh: make(chan RPCRequest, 5_000),
   171  		self:  conf.UniqueID,
   172  		peers: conf.Peers,
   173  		conf:  conf,
   174  		log:   conf.Log,
   175  	}
   176  	return c, nil
   177  }
   178  
   179  // Called by the implementer when an RPC is received from another node
   180  func (e *node) ReceiveRPC(req RPCRequest, resp *RPCResponse) {
   181  	// Ignore requests received when we are not running. If
   182  	// we don't we can create a race when initializing e.shutdownCh
   183  	// and we could fill up the rpcCh with requests that are never handled
   184  	if atomic.LoadInt64(&e.running) != 1 {
   185  		return
   186  	}
   187  
   188  	req.respChan = make(chan RPCResponse, 1)
   189  	e.rpcCh <- req
   190  
   191  	select {
   192  	case rpcResp := <-req.respChan:
   193  		*resp = rpcResp
   194  	case <-e.shutdownCh:
   195  	}
   196  }
   197  
   198  // SetPeers is a thread safe way to dynamically add or remove peers in a running cluster.
   199  // These peers will be contacted when requesting votes during leader election.
   200  func (e *node) SetPeers(ctx context.Context, peers []string) error {
   201  
   202  	// If the main loop is not running, there is no risk of race
   203  	if atomic.LoadInt64(&e.running) != 1 {
   204  		e.peers = peers
   205  		return nil
   206  	}
   207  
   208  	select {
   209  	case <-e.send(SetPeersReq{Peers: peers}):
   210  		return nil
   211  	case <-ctx.Done():
   212  		return ctx.Err()
   213  	}
   214  }
   215  
   216  // GetState returns the current state of this node
   217  func (e *node) GetState(ctx context.Context) (NodeState, error) {
   218  
   219  	// If the main loop is not running, there is no risk of race
   220  	if atomic.LoadInt64(&e.running) != 1 {
   221  		return NodeState{
   222  			Peers:  e.peers,
   223  			State:  e.state.String(),
   224  			Leader: e.leader,
   225  		}, nil
   226  	}
   227  
   228  	select {
   229  	case resp := <-e.send(GetStateReq{}):
   230  		if s, ok := resp.Response.(GetStateResp); ok {
   231  			return NodeState(s), nil
   232  		}
   233  	case <-ctx.Done():
   234  		return NodeState{}, ctx.Err()
   235  	}
   236  	return NodeState{}, nil
   237  }
   238  
   239  // IsLeader is a convenience function that calls GetState() and returns true
   240  // if this node was elected leader. May block if main loop is occupied.
   241  func (e *node) IsLeader() bool {
   242  	s, _ := e.GetState(context.Background())
   243  	return e.self == s.Leader
   244  }
   245  
   246  // GetLeader is a convenience function that calls GetState() returns the
   247  // unique id of the node that is currently leader. May block if main loop is occupied.
   248  func (e *node) GetLeader() string {
   249  	s, _ := e.GetState(context.Background())
   250  	return s.Leader
   251  }
   252  
   253  func (e *node) isLeader() bool {
   254  	return e.self == e.leader
   255  }
   256  
   257  func (e *node) setLeader(leader string) {
   258  	if e.leader != leader {
   259  		e.log.Debugf("Set Leader '%s'", leader)
   260  		e.leader = leader
   261  		if e.conf.OnUpdate != nil {
   262  			e.conf.OnUpdate(leader)
   263  		}
   264  	}
   265  }
   266  
   267  func (e *node) getLeader() string {
   268  	return e.leader
   269  }
   270  
   271  // Resign will cause this node to step down as leader, if this
   272  // node is NOT leader, this does nothing and returns ErrNotLeader
   273  func (e *node) Resign(ctx context.Context) error {
   274  
   275  	// Avoid blocking if main loop is not running
   276  	if atomic.LoadInt64(&e.running) != 1 {
   277  		return ErrNotLeader
   278  	}
   279  
   280  	select {
   281  	case rpcResp := <-e.send(ResignReq{}):
   282  		resp, ok := rpcResp.Response.(ResignResp)
   283  		if !ok {
   284  			return errors.New("resign response channel closed")
   285  		}
   286  		if rpcResp.Error != "" {
   287  			return errors.New(rpcResp.Error)
   288  		}
   289  		if resp.Success {
   290  			return nil
   291  		}
   292  		return ErrNotLeader
   293  	case <-e.shutdownCh:
   294  		return nil
   295  	case <-ctx.Done():
   296  		return ctx.Err()
   297  	}
   298  }
   299  
   300  // Start the main event loop which allows the election to proceed.
   301  // Call this method when the node is ready to be considered in the election.
   302  func (e *node) Start(ctx context.Context) error {
   303  	if atomic.LoadInt64(&e.running) == 1 {
   304  		return nil
   305  	}
   306  	e.shutdownCh = make(chan struct{})
   307  	atomic.StoreInt64(&e.running, 1)
   308  	e.wg.Go(e.run)
   309  	return nil
   310  }
   311  
   312  // Stop stops all internal go routines and if this node is currently
   313  // leader, resigns as leader.
   314  func (e *node) Stop(ctx context.Context) error {
   315  	if atomic.LoadInt64(&e.running) != 1 {
   316  		return nil
   317  	}
   318  	atomic.StoreInt64(&e.running, 0)
   319  	close(e.shutdownCh)
   320  	done := make(chan struct{})
   321  	go func() {
   322  		e.wg.Wait()
   323  		close(done)
   324  	}()
   325  
   326  	<-done
   327  	return ctx.Err()
   328  }
   329  
   330  // Main thread loop
   331  func (e *node) run() {
   332  	for {
   333  		select {
   334  		case <-e.shutdownCh:
   335  			e.state = shutdownState
   336  			return
   337  		default:
   338  		}
   339  
   340  		switch e.state {
   341  		case followerState:
   342  			e.runFollower()
   343  		case candidateState:
   344  			e.runCandidate()
   345  		case leaderState:
   346  			e.runLeader()
   347  		}
   348  	}
   349  }
   350  
   351  func (e *node) runFollower() {
   352  	e.log.Debugf("entering follower state, current leader is '%s'", e.leader)
   353  	timeout := randomDuration(e.conf.HeartBeatTimeout)
   354  	heartbeatTimer := time.NewTicker(timeout)
   355  	defer heartbeatTimer.Stop()
   356  	noPeersTimer := time.NewTimer(timeout / 5)
   357  	defer noPeersTimer.Stop()
   358  
   359  	for e.state == followerState {
   360  		select {
   361  		case rpc := <-e.rpcCh:
   362  			e.processRPC(rpc)
   363  		case <-heartbeatTimer.C:
   364  			// Check if we have had successful contact with the leader
   365  			if time.Since(e.lastContact) < e.conf.HeartBeatTimeout {
   366  				continue
   367  			}
   368  
   369  			// Heartbeat failed! Transition to the candidate state
   370  			e.log.Debugf("heartbeat timeout, starting election; previous leader was '%s'", e.leader)
   371  			e.setLeader("")
   372  			e.state = candidateState
   373  			return
   374  		case <-noPeersTimer.C:
   375  			// If we already have leader, don't check for no peers
   376  			if e.getLeader() != "" {
   377  				continue
   378  			}
   379  
   380  			// If we have no peers, or if we are the only peer, no need to wait
   381  			// for the heartbeat timeout. Change state to candidate and start the election.
   382  			if len(e.peers) == 0 || len(e.peers) == 1 && e.peers[0] == e.self {
   383  				e.state = candidateState
   384  				return
   385  			}
   386  		case <-e.shutdownCh:
   387  			return
   388  		}
   389  	}
   390  }
   391  
   392  func (e *node) runCandidate() {
   393  	e.log.Debugf("entering candidate state; current term '%d'", e.currentTerm+1)
   394  	voteCh := make(<-chan VoteResp)
   395  
   396  	// Each node will choose a random time to send their vote. This makes it more
   397  	// likely that the first node to send vote requests will win the election, and avoid
   398  	// a stalemate.
   399  	voteTimer := time.NewTimer(randomDuration(e.conf.HeartBeatTimeout / 10))
   400  	defer voteTimer.Stop()
   401  	// We re-start the vote if we have not received a heart beat from a chosen leader before
   402  	// this timer expires.
   403  	electionTimer := time.NewTimer(randomDuration(e.conf.ElectionTimeout))
   404  	defer electionTimer.Stop()
   405  
   406  	// Tally the votes, need a simple majority
   407  	grantedVotes := 0
   408  	votesNeeded := e.quorumSize()
   409  	e.log.Debugf("votes needed: %d", votesNeeded)
   410  
   411  	for e.state == candidateState {
   412  		select {
   413  		case <-voteTimer.C:
   414  			// Do not start a vote if we are below our minimum quorum
   415  			if len(e.peers) < e.conf.MinimumQuorum {
   416  				e.log.Warnf("peer count '%d' below minimum quorum of '%d'; sleeping...",
   417  					len(e.peers), e.conf.MinimumQuorum)
   418  				continue
   419  			}
   420  			voteCh = e.electSelf()
   421  		case rpc := <-e.rpcCh:
   422  			e.processRPC(rpc)
   423  		case vote := <-voteCh:
   424  			// Check if the term is greater than ours, bail
   425  			if vote.Term > e.currentTerm {
   426  				e.log.Debug("newer term discovered, fallback to follower")
   427  				e.state = followerState
   428  				e.currentTerm = vote.Term
   429  				return
   430  			}
   431  
   432  			// Check if the vote is granted
   433  			if vote.Granted {
   434  				grantedVotes++
   435  				e.log.Debugf("vote granted from '%s' term '%d', tally '%d'", vote.Candidate, vote.Term, grantedVotes)
   436  			}
   437  
   438  			// Check if we've become the leader
   439  			if grantedVotes >= votesNeeded {
   440  				e.log.Debugf("election won! tally is '%d'", grantedVotes)
   441  				e.state = leaderState
   442  				e.setLeader(e.self)
   443  				return
   444  			}
   445  		case <-electionTimer.C:
   446  			// Election failed! Restart the election. We simply return, which will kick us back into runCandidate
   447  			e.log.Debug("Election timeout reached, restarting election")
   448  			return
   449  		case <-e.shutdownCh:
   450  			return
   451  		}
   452  	}
   453  }
   454  
   455  // electSelf is used to send a VoteReq RPC to all peers with a vote for
   456  // ourself. This has the side affect of incrementing the current term. The
   457  // response channel returned is used to wait for all the responses, including a
   458  // vote for ourself.
   459  func (e *node) electSelf() <-chan VoteResp {
   460  	respCh := make(chan VoteResp, len(e.peers)+1)
   461  
   462  	// Increment the term
   463  	e.currentTerm++
   464  
   465  	// Construct a function to ask for a vote
   466  	askPeer := func(peer string, term uint64, self string) {
   467  		e.wg.Go(func() {
   468  			ctx, cancel := context.WithTimeout(context.Background(), e.conf.NetworkTimeout)
   469  			defer cancel()
   470  
   471  			// Construct the request
   472  			req := RPCRequest{
   473  				RPC: VoteRPC,
   474  				Request: VoteReq{
   475  					Term:      term,
   476  					Candidate: self,
   477  				},
   478  			}
   479  
   480  			var resp RPCResponse
   481  			if err := e.conf.SendRPC(ctx, peer, req, &resp); err != nil {
   482  				e.log.WithFields(logrus.Fields{"err": err, "peer": peer}).
   483  					Error("error during vote rpc")
   484  				vResp, ok := resp.Response.(VoteResp)
   485  				if !ok {
   486  					return
   487  				}
   488  				vResp.Term = term
   489  				vResp.Granted = false
   490  				respCh <- vResp
   491  			}
   492  			vResp, ok := resp.Response.(VoteResp)
   493  			if !ok {
   494  				return
   495  			}
   496  			respCh <- vResp
   497  		})
   498  	}
   499  
   500  	// Vote for ourselves first
   501  	e.vote.LastCandidate = e.self
   502  	e.vote.LastTerm = e.currentTerm
   503  
   504  	// Include our own vote
   505  	respCh <- VoteResp{
   506  		Candidate: e.self,
   507  		Term:      e.currentTerm,
   508  		Granted:   true,
   509  	}
   510  
   511  	// For each peer, request a vote
   512  	for _, peer := range e.peers {
   513  		if peer == e.self {
   514  			continue
   515  		}
   516  		askPeer(peer, e.currentTerm, e.self)
   517  	}
   518  	return respCh
   519  }
   520  
   521  func (e *node) runLeader() {
   522  	heartBeatTicker := time.NewTicker(randomDuration(e.conf.HeartBeatTimeout / 3))
   523  	defer heartBeatTicker.Stop()
   524  	quorumTicker := time.NewTicker(e.conf.LeaderQuorumTimeout)
   525  	defer quorumTicker.Stop()
   526  	peersLastContact := make(map[string]time.Time, len(e.peers))
   527  	heartBeatReplyCh := make(chan HeartBeatResp, 5_000)
   528  
   529  	for e.state == leaderState {
   530  		select {
   531  		case rpc := <-e.rpcCh:
   532  			e.processRPC(rpc)
   533  			// If the RPC was a set peers request, immediately send heart beats to all nodes
   534  			if _, ok := rpc.Request.(SetPeersReq); ok {
   535  				for _, peer := range e.peers {
   536  					e.sendHeartBeat(peer, heartBeatReplyCh)
   537  				}
   538  			}
   539  		case reply := <-heartBeatReplyCh:
   540  			// Is the reply from a peer we are familiar with?
   541  			if !slice.ContainsString(reply.From, e.peers, nil) {
   542  				e.log.WithField("peer", reply.From).
   543  					Debug("leader received heartbeat reply from peer not in our peer list; ignoring")
   544  				break
   545  			}
   546  
   547  			// Got a follower that says it's in a different term election. Update to the latest term
   548  			// which will propagate to all our followers when we send our next heartbeat. This ensures
   549  			// all nodes are on the latest term.
   550  			if reply.Term > e.currentTerm {
   551  				e.log.Warnf("got follower heartbeat term '%d' newer than our own '%d'; updating term",
   552  					reply.Term, e.currentTerm)
   553  				e.currentTerm = reply.Term
   554  			}
   555  
   556  			peersLastContact[reply.From] = time.Now()
   557  		case <-heartBeatTicker.C:
   558  			for _, peer := range e.peers {
   559  				e.sendHeartBeat(peer, heartBeatReplyCh)
   560  			}
   561  		case <-quorumTicker.C:
   562  			// If the number of peers falls below our MinimumQuorum then we step down.
   563  			if len(e.peers) < e.conf.MinimumQuorum {
   564  				e.log.Warnf("peer count '%d' below minimum quorum of '%d'; stepping down",
   565  					len(e.peers), e.conf.MinimumQuorum)
   566  				e.state = followerState
   567  				e.setLeader("")
   568  				// Inform the other peers we are stepping down
   569  				for _, peer := range e.peers {
   570  					e.sendElectionReset(peer)
   571  				}
   572  				return
   573  			}
   574  
   575  			// Check if we have received contact from a quorum of nodes within the leader quorum timeout interval.
   576  			// If not, we step down as we may have lost connectivity.
   577  			contacted := 0
   578  			now := time.Now()
   579  			for _, peer := range e.peers {
   580  				if peer == e.self {
   581  					continue
   582  				}
   583  
   584  				lc, ok := peersLastContact[peer]
   585  				if !ok {
   586  					e.log.Debugf("quorum check - peer '%s' not found", peer)
   587  					continue
   588  				}
   589  				diff := now.Sub(lc)
   590  				e.log.Debugf("quorum check - peer '%s' diff '%f", peer, diff.Seconds())
   591  				if diff >= e.conf.HeartBeatTimeout {
   592  					e.log.Debugf("no heartbeat response from '%s' for '%s'", peer, diff)
   593  					continue
   594  				}
   595  				contacted++
   596  			}
   597  
   598  			// Verify we can contact a quorum (Minus ourself)
   599  			quorum := e.quorumSize()
   600  			e.log.Debugf("quorum check - quorum='%d' contacted='%d'", quorum-1, contacted)
   601  			if contacted < (quorum - 1) {
   602  				e.log.Warn("failed to receive heart beats from a quorum of peers; stepping down")
   603  				e.state = followerState
   604  				e.setLeader("")
   605  				// Inform the other peers we are stepping down
   606  				for _, peer := range e.peers {
   607  					e.sendElectionReset(peer)
   608  				}
   609  				return
   610  			}
   611  		case <-e.shutdownCh:
   612  			e.state = shutdownState
   613  			e.log.Debug("leader shutdown")
   614  			if e.isLeader() {
   615  				// Notify all followers we are no longer leader
   616  				for _, peer := range e.peers {
   617  					e.sendElectionReset(peer)
   618  				}
   619  			}
   620  			return
   621  		}
   622  	}
   623  	e.lastContact = time.Now()
   624  	if e.isLeader() {
   625  		e.setLeader("")
   626  	}
   627  }
   628  
   629  func (e *node) sendHeartBeat(peer string, heartBeatReplyCh chan HeartBeatResp) {
   630  	// Don't heartbeat ourself
   631  	if peer == e.self {
   632  		return
   633  	}
   634  	// Avoid race by localizing the current term
   635  	term := e.currentTerm
   636  
   637  	e.wg.Go(func() {
   638  		var resp RPCResponse
   639  		req := RPCRequest{
   640  			RPC: HeartBeatRPC,
   641  			Request: HeartBeatReq{
   642  				From: e.self,
   643  				Term: term,
   644  			},
   645  		}
   646  
   647  		ctx, cancel := context.WithTimeout(context.Background(), e.conf.NetworkTimeout)
   648  		defer cancel()
   649  		if err := e.conf.SendRPC(ctx, peer, req, &resp); err != nil {
   650  			e.log.WithFields(logrus.Fields{"err": err, "peer": peer}).
   651  				Debug("error during heart beat rpc")
   652  			return
   653  		}
   654  		hResp, ok := resp.Response.(HeartBeatResp)
   655  		if !ok {
   656  			return
   657  		}
   658  		heartBeatReplyCh <- hResp
   659  	})
   660  }
   661  
   662  func (e *node) sendElectionReset(peer string) {
   663  	// Don't send election reset to ourself
   664  	if peer == e.self {
   665  		return
   666  	}
   667  
   668  	e.wg.Go(func() {
   669  		ctx, cancel := context.WithTimeout(context.Background(), e.conf.NetworkTimeout)
   670  		defer cancel()
   671  		req := RPCRequest{RPC: ResetElectionRPC, Request: ResetElectionReq{}}
   672  		if err := e.conf.SendRPC(ctx, peer, req, &RPCResponse{}); err != nil {
   673  			e.log.WithFields(logrus.Fields{"err": err, "peer": peer}).
   674  				Debug("error during reset election rpc")
   675  		}
   676  	})
   677  }
   678  
   679  func (e *node) processRPC(rpc RPCRequest) {
   680  	switch cmd := rpc.Request.(type) {
   681  	case VoteReq:
   682  		e.handleVote(rpc, cmd)
   683  	case ResetElectionReq:
   684  		e.handleResetElection(rpc)
   685  	case HeartBeatReq:
   686  		e.handleHeartBeat(rpc, cmd)
   687  	case ResignReq:
   688  		e.handleResign(rpc)
   689  	case SetPeersReq:
   690  		e.handleSetPeers(rpc, cmd)
   691  	case GetStateReq:
   692  		e.handleGetState(rpc)
   693  	default:
   694  		e.log.Errorf("got unexpected command %#v", rpc.Request)
   695  		rpc.respond(rpc.RPC, nil, "unexpected command")
   696  	}
   697  }
   698  
   699  // handleResign Notifies all followers that we are stepping down as leader.
   700  // if we are leader returns Success = true
   701  func (e *node) handleResign(rpc RPCRequest) {
   702  	e.log.Debug("RPC: election.ResignReq{}")
   703  	// If not leader, do nothing
   704  	if !e.isLeader() {
   705  		rpc.respond(rpc.RPC, ResignResp{}, "")
   706  		return
   707  	}
   708  
   709  	e.setLeader("")
   710  	e.state = followerState
   711  	for _, peer := range e.peers {
   712  		e.sendElectionReset(peer)
   713  	}
   714  	rpc.respond(rpc.RPC, ResignResp{Success: true}, "")
   715  }
   716  
   717  // handleResetElection resets our state and starts a new election
   718  func (e *node) handleResetElection(rpc RPCRequest) {
   719  	e.log.Debug("RPC: election.ResetElectionReq{}")
   720  	e.setLeader("")
   721  	e.state = candidateState
   722  	rpc.respond(rpc.RPC, ResetElectionResp{}, "")
   723  }
   724  
   725  // handleHeartBeat handles heartbeat requests from the elected leader
   726  func (e *node) handleHeartBeat(rpc RPCRequest, req HeartBeatReq) {
   727  	e.log.Debugf("RPC: %#v", req)
   728  	resp := HeartBeatResp{
   729  		From: e.self,
   730  		Term: e.currentTerm,
   731  	}
   732  
   733  	defer func() {
   734  		rpc.respond(rpc.RPC, resp, "")
   735  	}()
   736  
   737  	// This might occur if 2 or more nodes think they are elected leader. In this
   738  	// case all leaders that emit heartbeats will both fall back to follower, from
   739  	// there the followers will timeout waiting for a heartbeat and the vote will
   740  	// occur again, hopefully this time without electing 2 leaders.
   741  	//
   742  	// It's also possible that one leader sends it's heartbeats before the other leader,
   743  	// in that case the first leader to send a heartbeat becomes leader.
   744  	//
   745  	// This can also occur if a leader loses connectivity to the rest of the cluster.
   746  	// In this case we become the follower of who ever sent us a heartbeat, regardless
   747  	// of our current term compared the one who sent us the heartbeat.
   748  	if e.state != followerState {
   749  		e.state = followerState
   750  		resp.Term = req.Term
   751  	}
   752  
   753  	// Always update to the most current term. This way if a leader resigns, the new election
   754  	// will not elect the same leader again.
   755  	if req.Term > e.currentTerm {
   756  		e.currentTerm = req.Term
   757  	}
   758  
   759  	// Only the node with the most votes is the leader and should report heartbeats
   760  	e.setLeader(req.From)
   761  
   762  	e.lastContact = time.Now()
   763  }
   764  
   765  // handleVote determines who we will vote for this term
   766  func (e *node) handleVote(rpc RPCRequest, req VoteReq) {
   767  	e.log.Debugf("RPC: %#v", req)
   768  	resp := VoteResp{
   769  		Term:      e.currentTerm,
   770  		Candidate: e.self,
   771  		Granted:   false,
   772  	}
   773  
   774  	defer func() {
   775  		rpc.respond(rpc.RPC, resp, "")
   776  	}()
   777  
   778  	// Check if we have an existing leader (who's not the candidate). Votes are rejected
   779  	// if there is a known leader. If a leader wants to step down, they notify followers
   780  	// with the ResetElection RPC call.
   781  	if e.leader != "" && e.leader != req.Candidate {
   782  		e.log.Debugf("rejecting vote request from '%s' since we have leader '%s'", req.Candidate, e.leader)
   783  		return
   784  	}
   785  
   786  	// Ignore an older term
   787  	if req.Term < e.currentTerm {
   788  		return
   789  	}
   790  
   791  	// Increase the term if we see a newer one
   792  	if req.Term > e.currentTerm {
   793  		// Ensure transition to follower
   794  		e.log.Debugf("received a vote request with a newer term '%d'", req.Term)
   795  		e.state = followerState
   796  		e.currentTerm = req.Term
   797  		resp.Term = req.Term
   798  	}
   799  
   800  	// Check if we've voted in this election before
   801  	if e.vote.LastTerm == req.Term && e.vote.LastCandidate != "" {
   802  		e.log.Debugf("ignoring vote request from '%s'; already voted for '%s' in election '%d'",
   803  			req.Candidate, e.vote.LastCandidate, req.Term)
   804  		if e.vote.LastCandidate == req.Candidate {
   805  			e.log.Debugf("duplicate requestVote from candidate '%s'", req.Candidate)
   806  			resp.Granted = true
   807  		}
   808  		return
   809  	}
   810  
   811  	// Always vote for the first candidate we receive a request from for this term
   812  	e.vote.LastTerm = req.Term
   813  	e.vote.LastCandidate = req.Candidate
   814  
   815  	// Tell the requester we voted for him
   816  	resp.Granted = true
   817  	e.lastContact = time.Now()
   818  }
   819  
   820  func (e *node) handleSetPeers(rpc RPCRequest, req SetPeersReq) {
   821  	e.log.Debugf("RPC: %#v", req)
   822  	e.peers = req.Peers
   823  	rpc.respond(rpc.RPC, SetPeersResp{}, "")
   824  }
   825  
   826  func (e *node) handleGetState(rpc RPCRequest) {
   827  	e.log.Debug("RPC: election.GetStateReq{}")
   828  
   829  	rpc.respond(rpc.RPC, GetStateResp{
   830  		Peers:  e.peers,
   831  		State:  e.state.String(),
   832  		Leader: e.leader,
   833  	}, "")
   834  }
   835  
   836  func (e *node) quorumSize() int {
   837  	size := len(e.peers)
   838  	if size == 0 {
   839  		return 1
   840  	}
   841  	return size/2 + 1
   842  }
   843  
   844  func (e *node) send(req interface{}) chan RPCResponse {
   845  	respCh := make(chan RPCResponse, 1)
   846  
   847  	select {
   848  	case e.rpcCh <- RPCRequest{
   849  		Request:  req,
   850  		respChan: respCh,
   851  	}:
   852  	// Avoid blocking if the rpcCh is full
   853  	default:
   854  		e.conf.Log.Error("RPC send failed; rpc channel is full")
   855  		respCh <- RPCResponse{}
   856  	}
   857  	return respCh
   858  }
   859  
   860  // randomDuration returns a value that is between the minDur and 2x minDur.
   861  func randomDuration(minDur time.Duration) time.Duration {
   862  	return minDur + time.Duration(rand.Int63())%minDur //nolint:gosec // Cryptographic security not required.
   863  }
   864  
   865  // WaitForConnect waits for the specified address to accept connections then returns nil.
   866  // Returns an error if all attempts have been exhausted.
   867  func WaitForConnect(address string, attempts int, interval time.Duration) error {
   868  	var err error
   869  	var conn net.Conn
   870  	for i := 0; i < attempts; i++ {
   871  		conn, err = net.Dial("tcp", address)
   872  		if err != nil {
   873  			continue
   874  		}
   875  		conn.Close()
   876  		time.Sleep(interval)
   877  		return nil
   878  	}
   879  	return fmt.Errorf("while connecting to '%s' - '%s' after %d attempts", address, err, attempts)
   880  }