github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/swarmkit/manager/state/raft/raft_test.go (about)

     1  package raft_test
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"io/ioutil"
     8  	"math/rand"
     9  	"net"
    10  	"os"
    11  	"reflect"
    12  	"strconv"
    13  	"testing"
    14  	"time"
    15  
    16  	"google.golang.org/grpc"
    17  	"google.golang.org/grpc/codes"
    18  	"google.golang.org/grpc/grpclog"
    19  	"google.golang.org/grpc/status"
    20  
    21  	"code.cloudfoundry.org/clock/fakeclock"
    22  	"github.com/coreos/etcd/raft/raftpb"
    23  	"github.com/coreos/etcd/wal"
    24  	"github.com/docker/swarmkit/api"
    25  	cautils "github.com/docker/swarmkit/ca/testutils"
    26  	"github.com/docker/swarmkit/manager/state"
    27  	"github.com/docker/swarmkit/manager/state/raft"
    28  	raftutils "github.com/docker/swarmkit/manager/state/raft/testutils"
    29  	"github.com/docker/swarmkit/manager/state/raft/transport"
    30  	"github.com/docker/swarmkit/manager/state/store"
    31  	"github.com/docker/swarmkit/testutils"
    32  	"github.com/sirupsen/logrus"
    33  	"github.com/stretchr/testify/assert"
    34  	"github.com/stretchr/testify/require"
    35  )
    36  
    37  const (
    38  	DefaultProposalTime = 10 * time.Second
    39  	ShortProposalTime   = 1 * time.Second
    40  )
    41  
    42  func init() {
    43  	store.WedgeTimeout = 3 * time.Second
    44  	grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
    45  	logrus.SetOutput(ioutil.Discard)
    46  }
    47  
    48  var tc *cautils.TestCA
    49  
    50  func TestMain(m *testing.M) {
    51  	tc = cautils.NewTestCA(nil)
    52  
    53  	// Set a smaller segment size so we don't incur cost preallocating
    54  	// space on old filesystems like HFS+.
    55  	wal.SegmentSizeBytes = 64 * 1024
    56  
    57  	res := m.Run()
    58  	tc.Stop()
    59  	os.Exit(res)
    60  }
    61  
    62  func TestRaftBootstrap(t *testing.T) {
    63  	t.Parallel()
    64  
    65  	nodes, _ := raftutils.NewRaftCluster(t, tc)
    66  	defer raftutils.TeardownCluster(nodes)
    67  
    68  	assert.Len(t, nodes[1].GetMemberlist(), 3)
    69  	assert.Len(t, nodes[2].GetMemberlist(), 3)
    70  	assert.Len(t, nodes[3].GetMemberlist(), 3)
    71  }
    72  
    73  func dial(n *raftutils.TestNode, addr string) (*grpc.ClientConn, error) {
    74  	grpcOptions := []grpc.DialOption{
    75  		grpc.WithBackoffMaxDelay(2 * time.Second),
    76  		grpc.WithBlock(),
    77  	}
    78  	grpcOptions = append(grpcOptions, grpc.WithTransportCredentials(n.SecurityConfig.ClientTLSCreds))
    79  
    80  	grpcOptions = append(grpcOptions, grpc.WithTimeout(10*time.Second))
    81  
    82  	cc, err := grpc.Dial(addr, grpcOptions...)
    83  	if err != nil {
    84  		return nil, err
    85  	}
    86  	return cc, nil
    87  }
    88  
    89  func TestRaftJoinTwice(t *testing.T) {
    90  	t.Parallel()
    91  
    92  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
    93  	defer raftutils.TeardownCluster(nodes)
    94  
    95  	// Node 3's address changes
    96  	nodes[3].Server.Stop()
    97  	nodes[3].ShutdownRaft()
    98  	nodes[3].Listener.CloseListener()
    99  
   100  	l, err := net.Listen("tcp", "127.0.0.1:0")
   101  	require.NoError(t, err, "can't bind to raft service port")
   102  	nodes[3].Listener = raftutils.NewWrappedListener(l)
   103  	nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
   104  
   105  	// Node 3 tries to join again
   106  	// Use gRPC instead of calling handler directly because of
   107  	// authorization check.
   108  	cc, err := dial(nodes[3], nodes[1].Address)
   109  	assert.NoError(t, err)
   110  	raftClient := api.NewRaftMembershipClient(cc)
   111  	defer cc.Close()
   112  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   113  	_, err = raftClient.Join(ctx, &api.JoinRequest{Addr: l.Addr().String()})
   114  	cancel()
   115  	assert.NoError(t, err)
   116  
   117  	// Propose a value and wait for it to propagate
   118  	value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
   119  	assert.NoError(t, err, "failed to propose value")
   120  	raftutils.CheckValue(t, clockSource, nodes[2], value)
   121  
   122  	// Restart node 2
   123  	nodes[2].Server.Stop()
   124  	nodes[2].ShutdownRaft()
   125  	nodes[2] = raftutils.RestartNode(t, clockSource, nodes[2], false)
   126  	raftutils.WaitForCluster(t, clockSource, nodes)
   127  
   128  	// Node 2 should have the updated address for node 3 in its member list
   129  	require.NotNil(t, nodes[2].GetMemberlist()[nodes[3].Config.ID])
   130  	require.Equal(t, l.Addr().String(), nodes[2].GetMemberlist()[nodes[3].Config.ID].Addr)
   131  }
   132  
   133  func TestRaftLeader(t *testing.T) {
   134  	t.Parallel()
   135  
   136  	nodes, _ := raftutils.NewRaftCluster(t, tc)
   137  	defer raftutils.TeardownCluster(nodes)
   138  
   139  	assert.True(t, nodes[1].IsLeader(), "error: node 1 is not the Leader")
   140  
   141  	// nodes should all have the same leader
   142  	assert.Equal(t, nodes[1].Leader(), nodes[1].Config.ID)
   143  	assert.Equal(t, nodes[2].Leader(), nodes[1].Config.ID)
   144  	assert.Equal(t, nodes[3].Leader(), nodes[1].Config.ID)
   145  }
   146  
   147  func TestRaftLeaderDown(t *testing.T) {
   148  	t.Parallel()
   149  
   150  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   151  	defer raftutils.TeardownCluster(nodes)
   152  
   153  	// Stop node 1
   154  	nodes[1].ShutdownRaft()
   155  
   156  	newCluster := map[uint64]*raftutils.TestNode{
   157  		2: nodes[2],
   158  		3: nodes[3],
   159  	}
   160  	// Wait for the re-election to occur
   161  	raftutils.WaitForCluster(t, clockSource, newCluster)
   162  
   163  	// Leader should not be 1
   164  	assert.NotEqual(t, nodes[2].Leader(), nodes[1].Config.ID)
   165  
   166  	// Ensure that node 2 and node 3 have the same leader
   167  	assert.Equal(t, nodes[3].Leader(), nodes[2].Leader())
   168  
   169  	// Find the leader node and a follower node
   170  	var (
   171  		leaderNode   *raftutils.TestNode
   172  		followerNode *raftutils.TestNode
   173  	)
   174  	for i, n := range newCluster {
   175  		if n.Config.ID == n.Leader() {
   176  			leaderNode = n
   177  			if i == 2 {
   178  				followerNode = newCluster[3]
   179  			} else {
   180  				followerNode = newCluster[2]
   181  			}
   182  		}
   183  	}
   184  
   185  	require.NotNil(t, leaderNode)
   186  	require.NotNil(t, followerNode)
   187  
   188  	// Propose a value
   189  	value, err := raftutils.ProposeValue(t, leaderNode, DefaultProposalTime)
   190  	assert.NoError(t, err, "failed to propose value")
   191  
   192  	// The value should be replicated on all remaining nodes
   193  	raftutils.CheckValue(t, clockSource, leaderNode, value)
   194  	assert.Len(t, leaderNode.GetMemberlist(), 3)
   195  
   196  	raftutils.CheckValue(t, clockSource, followerNode, value)
   197  	assert.Len(t, followerNode.GetMemberlist(), 3)
   198  }
   199  
   200  func TestRaftFollowerDown(t *testing.T) {
   201  	t.Parallel()
   202  
   203  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   204  	defer raftutils.TeardownCluster(nodes)
   205  
   206  	// Stop node 3
   207  	nodes[3].ShutdownRaft()
   208  
   209  	// Leader should still be 1
   210  	assert.True(t, nodes[1].IsLeader(), "node 1 is not a leader anymore")
   211  	assert.Equal(t, nodes[2].Leader(), nodes[1].Config.ID)
   212  
   213  	// Propose a value
   214  	value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
   215  	assert.NoError(t, err, "failed to propose value")
   216  
   217  	// The value should be replicated on all remaining nodes
   218  	raftutils.CheckValue(t, clockSource, nodes[1], value)
   219  	assert.Len(t, nodes[1].GetMemberlist(), 3)
   220  
   221  	raftutils.CheckValue(t, clockSource, nodes[2], value)
   222  	assert.Len(t, nodes[2].GetMemberlist(), 3)
   223  }
   224  
   225  func TestRaftLogReplication(t *testing.T) {
   226  	t.Parallel()
   227  
   228  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   229  	defer raftutils.TeardownCluster(nodes)
   230  
   231  	// Propose a value
   232  	value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
   233  	assert.NoError(t, err, "failed to propose value")
   234  
   235  	// All nodes should have the value in the physical store
   236  	raftutils.CheckValue(t, clockSource, nodes[1], value)
   237  	raftutils.CheckValue(t, clockSource, nodes[2], value)
   238  	raftutils.CheckValue(t, clockSource, nodes[3], value)
   239  }
   240  
   241  func TestRaftWedgedManager(t *testing.T) {
   242  	t.Parallel()
   243  
   244  	nodeOpts := raft.NodeOptions{
   245  		DisableStackDump: true,
   246  	}
   247  
   248  	var clockSource *fakeclock.FakeClock
   249  	nodes := make(map[uint64]*raftutils.TestNode)
   250  	nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil, nodeOpts)
   251  	raftutils.AddRaftNode(t, clockSource, nodes, tc, nodeOpts)
   252  	raftutils.AddRaftNode(t, clockSource, nodes, tc, nodeOpts)
   253  	defer raftutils.TeardownCluster(nodes)
   254  
   255  	// Propose a value
   256  	_, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
   257  	assert.NoError(t, err, "failed to propose value")
   258  
   259  	doneCh := make(chan struct{})
   260  	defer close(doneCh)
   261  
   262  	go func() {
   263  		// Hold the store lock indefinitely
   264  		nodes[1].MemoryStore().Update(func(store.Tx) error {
   265  			<-doneCh
   266  			return nil
   267  		})
   268  	}()
   269  
   270  	assert.NoError(t, testutils.PollFunc(clockSource, func() error {
   271  		if nodes[1].Config.ID == nodes[1].Leader() {
   272  			return errors.New("leader has not changed")
   273  		}
   274  		return nil
   275  	}))
   276  }
   277  
   278  func TestRaftLogReplicationWithoutLeader(t *testing.T) {
   279  	t.Parallel()
   280  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   281  	defer raftutils.TeardownCluster(nodes)
   282  
   283  	// Stop the leader
   284  	nodes[1].ShutdownRaft()
   285  
   286  	// Propose a value
   287  	_, err := raftutils.ProposeValue(t, nodes[2], DefaultProposalTime)
   288  	assert.Error(t, err)
   289  
   290  	// No value should be replicated in the store in the absence of the leader
   291  	raftutils.CheckNoValue(t, clockSource, nodes[2])
   292  	raftutils.CheckNoValue(t, clockSource, nodes[3])
   293  }
   294  
   295  func TestRaftQuorumFailure(t *testing.T) {
   296  	t.Parallel()
   297  
   298  	// Bring up a 5 nodes cluster
   299  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   300  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   301  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   302  	defer raftutils.TeardownCluster(nodes)
   303  
   304  	// Lose a majority
   305  	for i := uint64(3); i <= 5; i++ {
   306  		nodes[i].Server.Stop()
   307  		nodes[i].ShutdownRaft()
   308  	}
   309  
   310  	// Propose a value
   311  	_, err := raftutils.ProposeValue(t, nodes[1], ShortProposalTime)
   312  	assert.Error(t, err)
   313  
   314  	// The value should not be replicated, we have no majority
   315  	raftutils.CheckNoValue(t, clockSource, nodes[2])
   316  	raftutils.CheckNoValue(t, clockSource, nodes[1])
   317  }
   318  
   319  func TestRaftQuorumRecovery(t *testing.T) {
   320  	t.Parallel()
   321  
   322  	// Bring up a 5 nodes cluster
   323  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   324  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   325  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   326  	defer raftutils.TeardownCluster(nodes)
   327  
   328  	// Lose a majority
   329  	for i := uint64(1); i <= 3; i++ {
   330  		nodes[i].Server.Stop()
   331  		nodes[i].ShutdownRaft()
   332  	}
   333  
   334  	raftutils.AdvanceTicks(clockSource, 5)
   335  
   336  	// Restore the majority by restarting node 3
   337  	nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
   338  
   339  	raftutils.ShutdownNode(nodes[1])
   340  	delete(nodes, 1)
   341  	raftutils.ShutdownNode(nodes[2])
   342  	delete(nodes, 2)
   343  	raftutils.WaitForCluster(t, clockSource, nodes)
   344  
   345  	// Propose a value
   346  	value, err := raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime)
   347  	assert.NoError(t, err)
   348  
   349  	for _, node := range nodes {
   350  		raftutils.CheckValue(t, clockSource, node, value)
   351  	}
   352  }
   353  
   354  func TestRaftFollowerLeave(t *testing.T) {
   355  	t.Parallel()
   356  
   357  	// Bring up a 5 nodes cluster
   358  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   359  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   360  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   361  	defer raftutils.TeardownCluster(nodes)
   362  
   363  	// Node 5 leaves the cluster
   364  	// Use gRPC instead of calling handler directly because of
   365  	// authorization check.
   366  	cc, err := dial(nodes[1], nodes[1].Address)
   367  	assert.NoError(t, err)
   368  	raftClient := api.NewRaftMembershipClient(cc)
   369  	defer cc.Close()
   370  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   371  	resp, err := raftClient.Leave(ctx, &api.LeaveRequest{Node: &api.RaftMember{RaftID: nodes[5].Config.ID}})
   372  	cancel()
   373  	assert.NoError(t, err, "error sending message to leave the raft")
   374  	assert.NotNil(t, resp, "leave response message is nil")
   375  
   376  	raftutils.ShutdownNode(nodes[5])
   377  	delete(nodes, 5)
   378  
   379  	raftutils.WaitForPeerNumber(t, clockSource, nodes, 4)
   380  
   381  	// Propose a value
   382  	value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
   383  	assert.NoError(t, err, "failed to propose value")
   384  
   385  	// Value should be replicated on every node
   386  	raftutils.CheckValue(t, clockSource, nodes[1], value)
   387  	assert.Len(t, nodes[1].GetMemberlist(), 4)
   388  
   389  	raftutils.CheckValue(t, clockSource, nodes[2], value)
   390  	assert.Len(t, nodes[2].GetMemberlist(), 4)
   391  
   392  	raftutils.CheckValue(t, clockSource, nodes[3], value)
   393  	assert.Len(t, nodes[3].GetMemberlist(), 4)
   394  
   395  	raftutils.CheckValue(t, clockSource, nodes[4], value)
   396  	assert.Len(t, nodes[4].GetMemberlist(), 4)
   397  }
   398  
   399  func TestRaftLeaderLeave(t *testing.T) {
   400  	t.Parallel()
   401  
   402  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   403  	defer raftutils.TeardownCluster(nodes)
   404  
   405  	// node 1 is the leader
   406  	assert.Equal(t, nodes[1].Leader(), nodes[1].Config.ID)
   407  
   408  	// Try to leave the raft
   409  	// Use gRPC instead of calling handler directly because of
   410  	// authorization check.
   411  	cc, err := dial(nodes[1], nodes[1].Address)
   412  	assert.NoError(t, err)
   413  	raftClient := api.NewRaftMembershipClient(cc)
   414  	defer cc.Close()
   415  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   416  	resp, err := raftClient.Leave(ctx, &api.LeaveRequest{Node: &api.RaftMember{RaftID: nodes[1].Config.ID}})
   417  	cancel()
   418  	assert.NoError(t, err, "error sending message to leave the raft")
   419  	assert.NotNil(t, resp, "leave response message is nil")
   420  
   421  	newCluster := map[uint64]*raftutils.TestNode{
   422  		2: nodes[2],
   423  		3: nodes[3],
   424  	}
   425  	// Wait for election tick
   426  	raftutils.WaitForCluster(t, clockSource, newCluster)
   427  
   428  	// Leader should not be 1
   429  	assert.NotEqual(t, nodes[2].Leader(), nodes[1].Config.ID)
   430  	assert.Equal(t, nodes[2].Leader(), nodes[3].Leader())
   431  
   432  	leader := nodes[2].Leader()
   433  
   434  	// Find the leader node and a follower node
   435  	var (
   436  		leaderNode   *raftutils.TestNode
   437  		followerNode *raftutils.TestNode
   438  	)
   439  	for i, n := range nodes {
   440  		if n.Config.ID == leader {
   441  			leaderNode = n
   442  			if i == 2 {
   443  				followerNode = nodes[3]
   444  			} else {
   445  				followerNode = nodes[2]
   446  			}
   447  		}
   448  	}
   449  
   450  	require.NotNil(t, leaderNode)
   451  	require.NotNil(t, followerNode)
   452  
   453  	// Propose a value
   454  	value, err := raftutils.ProposeValue(t, leaderNode, DefaultProposalTime)
   455  	assert.NoError(t, err, "failed to propose value")
   456  
   457  	// The value should be replicated on all remaining nodes
   458  	raftutils.CheckValue(t, clockSource, leaderNode, value)
   459  	assert.Len(t, leaderNode.GetMemberlist(), 2)
   460  
   461  	raftutils.CheckValue(t, clockSource, followerNode, value)
   462  	assert.Len(t, followerNode.GetMemberlist(), 2)
   463  
   464  	raftutils.TeardownCluster(newCluster)
   465  }
   466  
   467  func TestRaftNewNodeGetsData(t *testing.T) {
   468  	t.Parallel()
   469  
   470  	// Bring up a 3 node cluster
   471  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   472  	defer raftutils.TeardownCluster(nodes)
   473  
   474  	// Propose a value
   475  	value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
   476  	assert.NoError(t, err, "failed to propose value")
   477  
   478  	// Add a new node
   479  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   480  
   481  	time.Sleep(500 * time.Millisecond)
   482  
   483  	// Value should be replicated on every node
   484  	for _, node := range nodes {
   485  		raftutils.CheckValue(t, clockSource, node, value)
   486  		assert.Len(t, node.GetMemberlist(), 4)
   487  	}
   488  }
   489  
   490  func TestChangesBetween(t *testing.T) {
   491  	t.Parallel()
   492  
   493  	node, _ := raftutils.NewInitNode(t, tc, nil)
   494  	defer raftutils.ShutdownNode(node)
   495  
   496  	startVersion := node.GetVersion()
   497  
   498  	// Propose 10 values
   499  	nodeIDs := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8", "id9", "id10"}
   500  	values := make([]*api.Node, 10)
   501  	for i, nodeID := range nodeIDs {
   502  		value, err := raftutils.ProposeValue(t, node, DefaultProposalTime, nodeID)
   503  		assert.NoError(t, err, "failed to propose value")
   504  		values[i] = value
   505  	}
   506  
   507  	versionAdd := func(version *api.Version, offset int64) api.Version {
   508  		return api.Version{Index: uint64(int64(version.Index) + offset)}
   509  	}
   510  
   511  	expectedChanges := func(startVersion api.Version, values []*api.Node) []state.Change {
   512  		var changes []state.Change
   513  
   514  		for i, value := range values {
   515  			changes = append(changes,
   516  				state.Change{
   517  					Version: versionAdd(&startVersion, int64(i+1)),
   518  					StoreActions: []api.StoreAction{
   519  						{
   520  							Action: api.StoreActionKindCreate,
   521  							Target: &api.StoreAction_Node{
   522  								Node: value,
   523  							},
   524  						},
   525  					},
   526  				},
   527  			)
   528  		}
   529  
   530  		return changes
   531  	}
   532  
   533  	// Satisfiable requests
   534  	changes, err := node.ChangesBetween(versionAdd(startVersion, -1), *startVersion)
   535  	assert.NoError(t, err)
   536  	assert.Len(t, changes, 0)
   537  
   538  	changes, err = node.ChangesBetween(*startVersion, versionAdd(startVersion, 1))
   539  	assert.NoError(t, err)
   540  	require.Len(t, changes, 1)
   541  	assert.Equal(t, expectedChanges(*startVersion, values[:1]), changes)
   542  
   543  	changes, err = node.ChangesBetween(*startVersion, versionAdd(startVersion, 10))
   544  	assert.NoError(t, err)
   545  	require.Len(t, changes, 10)
   546  	assert.Equal(t, expectedChanges(*startVersion, values), changes)
   547  
   548  	changes, err = node.ChangesBetween(versionAdd(startVersion, 2), versionAdd(startVersion, 6))
   549  	assert.NoError(t, err)
   550  	require.Len(t, changes, 4)
   551  	assert.Equal(t, expectedChanges(versionAdd(startVersion, 2), values[2:6]), changes)
   552  
   553  	// Unsatisfiable requests
   554  	_, err = node.ChangesBetween(versionAdd(startVersion, -1), versionAdd(startVersion, 11))
   555  	assert.Error(t, err)
   556  	_, err = node.ChangesBetween(versionAdd(startVersion, 11), versionAdd(startVersion, 11))
   557  	assert.Error(t, err)
   558  	_, err = node.ChangesBetween(versionAdd(startVersion, 11), versionAdd(startVersion, 15))
   559  	assert.Error(t, err)
   560  }
   561  
   562  func TestRaftRejoin(t *testing.T) {
   563  	t.Parallel()
   564  
   565  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   566  	defer raftutils.TeardownCluster(nodes)
   567  
   568  	ids := []string{"id1", "id2"}
   569  
   570  	// Propose a value
   571  	values := make([]*api.Node, 2)
   572  	var err error
   573  	values[0], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, ids[0])
   574  	assert.NoError(t, err, "failed to propose value")
   575  
   576  	// The value should be replicated on node 3
   577  	raftutils.CheckValue(t, clockSource, nodes[3], values[0])
   578  	assert.Len(t, nodes[3].GetMemberlist(), 3)
   579  
   580  	// Stop node 3
   581  	nodes[3].Server.Stop()
   582  	nodes[3].ShutdownRaft()
   583  
   584  	// Propose another value
   585  	values[1], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, ids[1])
   586  	assert.NoError(t, err, "failed to propose value")
   587  
   588  	// Nodes 1 and 2 should have the new value
   589  	raftutils.CheckValuesOnNodes(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2]}, ids, values)
   590  
   591  	nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
   592  	raftutils.WaitForCluster(t, clockSource, nodes)
   593  
   594  	// Node 3 should have all values, including the one proposed while
   595  	// it was unavailable.
   596  	raftutils.CheckValuesOnNodes(t, clockSource, nodes, ids, values)
   597  }
   598  
   599  func testRaftRestartCluster(t *testing.T, stagger bool) {
   600  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   601  	defer raftutils.TeardownCluster(nodes)
   602  
   603  	// Propose a value
   604  	values := make([]*api.Node, 2)
   605  	var err error
   606  	values[0], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id1")
   607  	assert.NoError(t, err, "failed to propose value")
   608  
   609  	// Stop all nodes
   610  	for _, node := range nodes {
   611  		node.Server.Stop()
   612  		node.ShutdownRaft()
   613  	}
   614  
   615  	raftutils.AdvanceTicks(clockSource, 5)
   616  
   617  	// Restart all nodes
   618  	i := 0
   619  	for k, node := range nodes {
   620  		if stagger && i != 0 {
   621  			raftutils.AdvanceTicks(clockSource, 1)
   622  		}
   623  		nodes[k] = raftutils.RestartNode(t, clockSource, node, false)
   624  		i++
   625  	}
   626  	raftutils.WaitForCluster(t, clockSource, nodes)
   627  
   628  	// Propose another value
   629  	values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, "id2")
   630  	assert.NoError(t, err, "failed to propose value")
   631  
   632  	for _, node := range nodes {
   633  		assert.NoError(t, testutils.PollFunc(clockSource, func() error {
   634  			var err error
   635  			node.MemoryStore().View(func(tx store.ReadTx) {
   636  				var allNodes []*api.Node
   637  				allNodes, err = store.FindNodes(tx, store.All)
   638  				if err != nil {
   639  					return
   640  				}
   641  				if len(allNodes) != 2 {
   642  					err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes))
   643  					return
   644  				}
   645  
   646  				for i, nodeID := range []string{"id1", "id2"} {
   647  					n := store.GetNode(tx, nodeID)
   648  					if !reflect.DeepEqual(n, values[i]) {
   649  						err = fmt.Errorf("node %s did not match expected value", nodeID)
   650  						return
   651  					}
   652  				}
   653  			})
   654  			return err
   655  		}))
   656  	}
   657  }
   658  
   659  func TestRaftRestartClusterSimultaneously(t *testing.T) {
   660  	t.Parallel()
   661  
   662  	// Establish a cluster, stop all nodes (simulating a total outage), and
   663  	// restart them simultaneously.
   664  	testRaftRestartCluster(t, false)
   665  }
   666  
   667  func TestRaftRestartClusterStaggered(t *testing.T) {
   668  	t.Parallel()
   669  
   670  	// Establish a cluster, stop all nodes (simulating a total outage), and
   671  	// restart them one at a time.
   672  	testRaftRestartCluster(t, true)
   673  }
   674  
   675  func TestRaftWipedState(t *testing.T) {
   676  	t.Parallel()
   677  
   678  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   679  	defer raftutils.TeardownCluster(nodes)
   680  
   681  	// Stop node 3
   682  	nodes[3].Server.Stop()
   683  	nodes[3].ShutdownRaft()
   684  
   685  	// Remove its state
   686  	os.RemoveAll(nodes[3].StateDir)
   687  
   688  	raftutils.AdvanceTicks(clockSource, 5)
   689  
   690  	// Restart node 3
   691  	nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false)
   692  
   693  	// Make sure this doesn't panic.
   694  	testutils.PollFuncWithTimeout(clockSource, func() error { return errors.New("keep the poll going") }, time.Second)
   695  }
   696  
   697  func TestRaftForceNewCluster(t *testing.T) {
   698  	t.Parallel()
   699  
   700  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   701  	defer raftutils.TeardownCluster(nodes)
   702  
   703  	// Propose a value
   704  	values := make([]*api.Node, 2)
   705  	var err error
   706  	values[0], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id1")
   707  	assert.NoError(t, err, "failed to propose value")
   708  
   709  	// The memberlist should contain 3 members on each node
   710  	for i := 1; i <= 3; i++ {
   711  		assert.Len(t, nodes[uint64(i)].GetMemberlist(), 3)
   712  	}
   713  
   714  	// Stop the first node, and remove the second and third one.
   715  	nodes[1].Server.Stop()
   716  	nodes[1].ShutdownRaft()
   717  
   718  	raftutils.AdvanceTicks(clockSource, 5)
   719  
   720  	raftutils.ShutdownNode(nodes[2])
   721  	delete(nodes, 2)
   722  	raftutils.ShutdownNode(nodes[3])
   723  	delete(nodes, 3)
   724  
   725  	// Only restart the first node with force-new-cluster option
   726  	nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], true)
   727  	raftutils.WaitForCluster(t, clockSource, nodes)
   728  
   729  	// The memberlist should contain only one node (self)
   730  	assert.Len(t, nodes[1].GetMemberlist(), 1)
   731  
   732  	// Replace the other 2 members
   733  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   734  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   735  
   736  	// The memberlist should contain 3 members on each node
   737  	for i := 1; i <= 3; i++ {
   738  		assert.Len(t, nodes[uint64(i)].GetMemberlist(), 3)
   739  	}
   740  
   741  	// Propose another value
   742  	values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, "id2")
   743  	assert.NoError(t, err, "failed to propose value")
   744  
   745  	for _, node := range nodes {
   746  		assert.NoError(t, testutils.PollFunc(clockSource, func() error {
   747  			var err error
   748  			node.MemoryStore().View(func(tx store.ReadTx) {
   749  				var allNodes []*api.Node
   750  				allNodes, err = store.FindNodes(tx, store.All)
   751  				if err != nil {
   752  					return
   753  				}
   754  				if len(allNodes) != 2 {
   755  					err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes))
   756  					return
   757  				}
   758  
   759  				for i, nodeID := range []string{"id1", "id2"} {
   760  					n := store.GetNode(tx, nodeID)
   761  					if !reflect.DeepEqual(n, values[i]) {
   762  						err = fmt.Errorf("node %s did not match expected value", nodeID)
   763  						return
   764  					}
   765  				}
   766  			})
   767  			return err
   768  		}))
   769  	}
   770  }
   771  
   772  func TestRaftUnreachableNode(t *testing.T) {
   773  	t.Parallel()
   774  
   775  	nodes := make(map[uint64]*raftutils.TestNode)
   776  	defer raftutils.TeardownCluster(nodes)
   777  	var clockSource *fakeclock.FakeClock
   778  	nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil)
   779  
   780  	// Add a new node
   781  	raftutils.AddRaftNode(t, clockSource, nodes, tc, raft.NodeOptions{JoinAddr: nodes[1].Address})
   782  
   783  	// Stop the Raft server of second node on purpose after joining
   784  	nodes[2].Server.Stop()
   785  	nodes[2].Listener.Close()
   786  
   787  	raftutils.AdvanceTicks(clockSource, 5)
   788  	time.Sleep(100 * time.Millisecond)
   789  
   790  	wrappedListener := raftutils.RecycleWrappedListener(nodes[2].Listener)
   791  	securityConfig := nodes[2].SecurityConfig
   792  	serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)}
   793  	s := grpc.NewServer(serverOpts...)
   794  
   795  	nodes[2].Server = s
   796  	raft.Register(s, nodes[2].Node)
   797  
   798  	go s.Serve(wrappedListener)
   799  
   800  	raftutils.WaitForCluster(t, clockSource, nodes)
   801  	defer raftutils.TeardownCluster(nodes)
   802  
   803  	// Propose a value
   804  	value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime)
   805  	assert.NoError(t, err, "failed to propose value")
   806  
   807  	// All nodes should have the value in the physical store
   808  	raftutils.CheckValue(t, clockSource, nodes[1], value)
   809  	raftutils.CheckValue(t, clockSource, nodes[2], value)
   810  }
   811  
   812  func TestRaftJoinWithIncorrectAddress(t *testing.T) {
   813  	t.Parallel()
   814  
   815  	nodes := make(map[uint64]*raftutils.TestNode)
   816  	var clockSource *fakeclock.FakeClock
   817  	nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil)
   818  	defer raftutils.ShutdownNode(nodes[1])
   819  
   820  	// Try joining a new node with an incorrect address
   821  	n := raftutils.NewNode(t, clockSource, tc, raft.NodeOptions{JoinAddr: nodes[1].Address, Addr: "1.2.3.4:1234"})
   822  	defer raftutils.CleanupNonRunningNode(n)
   823  
   824  	err := n.JoinAndStart(context.Background())
   825  	assert.NotNil(t, err)
   826  	assert.Contains(t, testutils.ErrorDesc(err), "could not connect to prospective new cluster member using its advertised address")
   827  
   828  	// Check if first node still has only itself registered in the memberlist
   829  	assert.Len(t, nodes[1].GetMemberlist(), 1)
   830  }
   831  
   832  func TestStress(t *testing.T) {
   833  	t.Parallel()
   834  
   835  	// Bring up a 5 nodes cluster
   836  	nodes, clockSource := raftutils.NewRaftCluster(t, tc)
   837  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   838  	raftutils.AddRaftNode(t, clockSource, nodes, tc)
   839  	defer raftutils.TeardownCluster(nodes)
   840  
   841  	// number of nodes that are running
   842  	nup := len(nodes)
   843  	// record of nodes that are down
   844  	idleNodes := map[int]struct{}{}
   845  	// record of ids that proposed successfully or time-out
   846  	pIDs := []string{}
   847  
   848  	leader := -1
   849  	for iters := 0; iters < 1000; iters++ {
   850  		// keep proposing new values and killing leader
   851  		for i := 1; i <= 5; i++ {
   852  			if nodes[uint64(i)] != nil {
   853  				id := strconv.Itoa(iters)
   854  				_, err := raftutils.ProposeValue(t, nodes[uint64(i)], ShortProposalTime, id)
   855  
   856  				if err == nil {
   857  					pIDs = append(pIDs, id)
   858  					// if propose successfully, at least there are 3 running nodes
   859  					assert.True(t, nup >= 3)
   860  					// only leader can propose value
   861  					assert.True(t, leader == i || leader == -1)
   862  					// update leader
   863  					leader = i
   864  					break
   865  				} else {
   866  					// though ProposeValue returned an error, we still record this value,
   867  					// for it may be proposed successfully and stored in Raft some time later
   868  					pIDs = append(pIDs, id)
   869  				}
   870  			}
   871  		}
   872  
   873  		if rand.Intn(100) < 10 {
   874  			// increase clock to make potential election finish quickly
   875  			clockSource.Increment(200 * time.Millisecond)
   876  			time.Sleep(10 * time.Millisecond)
   877  		} else {
   878  			ms := rand.Intn(10)
   879  			clockSource.Increment(time.Duration(ms) * time.Millisecond)
   880  		}
   881  
   882  		if leader != -1 {
   883  			// if propose successfully, try to kill a node in random
   884  			s := rand.Intn(5) + 1
   885  			if _, ok := idleNodes[s]; !ok {
   886  				id := uint64(s)
   887  				nodes[id].Server.Stop()
   888  				nodes[id].ShutdownRaft()
   889  				idleNodes[s] = struct{}{}
   890  				nup -= 1
   891  				if s == leader {
   892  					// leader is killed
   893  					leader = -1
   894  				}
   895  			}
   896  		}
   897  
   898  		if nup < 3 {
   899  			// if quorum is lost, try to bring back a node
   900  			s := rand.Intn(5) + 1
   901  			if _, ok := idleNodes[s]; ok {
   902  				id := uint64(s)
   903  				nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false)
   904  				delete(idleNodes, s)
   905  				nup++
   906  			}
   907  		}
   908  	}
   909  
   910  	// bring back all nodes and propose the final value
   911  	for i := range idleNodes {
   912  		id := uint64(i)
   913  		nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false)
   914  	}
   915  	raftutils.WaitForCluster(t, clockSource, nodes)
   916  	id := strconv.Itoa(1000)
   917  	val, err := raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, id)
   918  	assert.NoError(t, err, "failed to propose value")
   919  	pIDs = append(pIDs, id)
   920  
   921  	// increase clock to make cluster stable
   922  	time.Sleep(500 * time.Millisecond)
   923  	clockSource.Increment(500 * time.Millisecond)
   924  
   925  	ids, values := raftutils.GetAllValuesOnNode(t, clockSource, nodes[1])
   926  
   927  	// since cluster is stable, final value must be in the raft store
   928  	find := false
   929  	for _, value := range values {
   930  		if reflect.DeepEqual(value, val) {
   931  			find = true
   932  			break
   933  		}
   934  	}
   935  	assert.True(t, find)
   936  
   937  	// all nodes must have the same value
   938  	raftutils.CheckValuesOnNodes(t, clockSource, nodes, ids, values)
   939  
   940  	// ids should be a subset of pIDs
   941  	for _, id := range ids {
   942  		find = false
   943  		for _, pid := range pIDs {
   944  			if id == pid {
   945  				find = true
   946  				break
   947  			}
   948  		}
   949  		assert.True(t, find)
   950  	}
   951  }
   952  
   953  // Test the server side code for raft snapshot streaming.
   954  func TestStreamRaftMessage(t *testing.T) {
   955  	ctx, cancel := context.WithCancel(context.Background())
   956  	defer cancel()
   957  
   958  	nodes, _ := raftutils.NewRaftCluster(t, tc)
   959  	defer raftutils.TeardownCluster(nodes)
   960  
   961  	cc, err := dial(nodes[1], nodes[1].Address)
   962  	assert.NoError(t, err)
   963  
   964  	stream, err := api.NewRaftClient(cc).StreamRaftMessage(ctx)
   965  	assert.NoError(t, err)
   966  
   967  	err = stream.Send(&api.StreamRaftMessageRequest{Message: raftutils.NewSnapshotMessage(2, 1, transport.GRPCMaxMsgSize/2)})
   968  	assert.NoError(t, err)
   969  	_, err = stream.CloseAndRecv()
   970  	assert.NoError(t, err)
   971  
   972  	stream, err = api.NewRaftClient(cc).StreamRaftMessage(ctx)
   973  	assert.NoError(t, err)
   974  
   975  	msg := raftutils.NewSnapshotMessage(2, 1, transport.GRPCMaxMsgSize)
   976  
   977  	raftMsg := &api.StreamRaftMessageRequest{Message: msg}
   978  	err = stream.Send(raftMsg)
   979  	assert.NoError(t, err)
   980  
   981  	_, err = stream.CloseAndRecv()
   982  	errStr := fmt.Sprintf("grpc: received message larger than max (%d vs. %d)", raftMsg.Size(), transport.GRPCMaxMsgSize)
   983  	s, _ := status.FromError(err)
   984  	assert.Equal(t, codes.ResourceExhausted, s.Code())
   985  	assert.Equal(t, errStr, s.Message())
   986  
   987  	// Sending multiple snap messages with different indexes
   988  	// should return an error.
   989  	stream, err = api.NewRaftClient(cc).StreamRaftMessage(ctx)
   990  	assert.NoError(t, err)
   991  	msg = raftutils.NewSnapshotMessage(2, 1, 10)
   992  	raftMsg = &api.StreamRaftMessageRequest{Message: msg}
   993  	err = stream.Send(raftMsg)
   994  	assert.NoError(t, err)
   995  	msg = raftutils.NewSnapshotMessage(2, 1, 10)
   996  	msg.Index++
   997  	raftMsg = &api.StreamRaftMessageRequest{Message: msg}
   998  	err = stream.Send(raftMsg)
   999  	assert.NoError(t, err)
  1000  	_, err = stream.CloseAndRecv()
  1001  	s, _ = status.FromError(err)
  1002  	assert.Equal(t, codes.InvalidArgument, s.Code())
  1003  	errStr = "Raft message chunk with index 1 is different from the previously received raft message index 0"
  1004  	assert.Equal(t, errStr, s.Message())
  1005  
  1006  	// Sending multiple of type != MsgSnap should return an error.
  1007  	stream, err = api.NewRaftClient(cc).StreamRaftMessage(ctx)
  1008  	assert.NoError(t, err)
  1009  	msg = raftutils.NewSnapshotMessage(2, 1, 10)
  1010  	msg.Type = raftpb.MsgApp
  1011  	raftMsg = &api.StreamRaftMessageRequest{Message: msg}
  1012  	err = stream.Send(raftMsg)
  1013  	assert.NoError(t, err)
  1014  	// Send same message again.
  1015  	err = stream.Send(raftMsg)
  1016  	assert.NoError(t, err)
  1017  	_, err = stream.CloseAndRecv()
  1018  	s, _ = status.FromError(err)
  1019  	assert.Equal(t, codes.InvalidArgument, s.Code())
  1020  	errStr = fmt.Sprintf("Raft message chunk is not of type %d", raftpb.MsgSnap)
  1021  	assert.Equal(t, errStr, s.Message())
  1022  }
  1023  
  1024  // TestGetNodeIDByRaftID tests the GetNodeIDByRaftID function. It's a very
  1025  // simple test but those are the kind that make a difference over time
  1026  func TestGetNodeIDByRaftID(t *testing.T) {
  1027  	t.Parallel()
  1028  
  1029  	nodes, _ := raftutils.NewRaftCluster(t, tc)
  1030  	defer raftutils.TeardownCluster(nodes)
  1031  
  1032  	// get the member list
  1033  	members := nodes[1].GetMemberlist()
  1034  	// get all of the raft ids
  1035  	raftIDs := make([]uint64, 0, len(members))
  1036  	for _, member := range members {
  1037  		raftIDs = append(raftIDs, member.RaftID)
  1038  	}
  1039  
  1040  	// now go and get the nodeID of every raftID
  1041  	for _, id := range raftIDs {
  1042  		nodeid, err := nodes[1].GetNodeIDByRaftID(id)
  1043  		assert.NoError(t, err, "raft ID %v should give us a node ID", id)
  1044  		// now go through the member manually list and make sure this is
  1045  		// correct
  1046  		for _, member := range members {
  1047  			assert.True(t,
  1048  				// either both should match, or both should not match. if they
  1049  				// are different, then there is an error
  1050  				(member.RaftID == id) == (member.NodeID == nodeid),
  1051  				"member with id %v has node id %v, but we expected member with id %v to have node id %v",
  1052  				member.RaftID, member.NodeID, id, nodeid,
  1053  			)
  1054  		}
  1055  	}
  1056  
  1057  	// now expect a nonexistent raft member to return ErrNoMember
  1058  	id, err := nodes[1].GetNodeIDByRaftID(8675309)
  1059  	assert.Equal(t, err, raft.ErrMemberUnknown)
  1060  	assert.Empty(t, id)
  1061  }