github.com/koko1123/flow-go-1@v0.29.6/consensus/hotstuff/integration/slow_test.go (about)

     1  //go:build timesensitivetest
     2  // +build timesensitivetest
     3  
     4  // This file includes a few time sensitive tests. They might pass on your powerful local machine
     5  // but fail on slow CI machine.
     6  // For now, these tests are only for local. Run it with the build tag on:
     7  // > go test --tags=relic,timesensitivetest ./...
     8  
     9  package integration
    10  
    11  import (
    12  	"errors"
    13  	"sync"
    14  	"testing"
    15  	"time"
    16  
    17  	"github.com/stretchr/testify/assert"
    18  	"github.com/stretchr/testify/require"
    19  
    20  	"github.com/koko1123/flow-go-1/consensus/hotstuff/pacemaker/timeout"
    21  	"github.com/koko1123/flow-go-1/utils/unittest"
    22  )
    23  
    24  // pacemaker timeout
    25  // if your laptop is fast enough, 10 ms is enough
    26  const pmTimeout = 10 * time.Millisecond
    27  
    28  // If 2 nodes are down in a 7 nodes cluster, the rest of 5 nodes can
    29  // still make progress and reach consensus
    30  func Test2TimeoutOutof7Instances(t *testing.T) {
    31  
    32  	// test parameters
    33  	// NOTE: block finalization seems to be rather slow on CI at the moment,
    34  	// needing around 1 minute on Travis for 1000 blocks and 10 minutes on
    35  	// TeamCity for 1000 blocks; in order to avoid test timeouts, we keep the
    36  	// number low here
    37  	numPass := 5
    38  	numFail := 2
    39  	finalView := uint64(30)
    40  
    41  	// generate the seven hotstuff participants
    42  	participants := unittest.IdentityListFixture(numPass + numFail)
    43  	instances := make([]*Instance, 0, numPass+numFail)
    44  	root := DefaultRoot()
    45  	timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 0.5, 1.5, 0.85, 0)
    46  	require.NoError(t, err)
    47  
    48  	// set up five instances that work fully
    49  	for n := 0; n < numPass; n++ {
    50  		in := NewInstance(t,
    51  			WithRoot(root),
    52  			WithParticipants(participants),
    53  			WithLocalID(participants[n].NodeID),
    54  			WithTimeouts(timeouts),
    55  			WithStopCondition(ViewReached(finalView)),
    56  		)
    57  		instances = append(instances, in)
    58  	}
    59  
    60  	// set up two instances which can't vote, nor propose
    61  	for n := numPass; n < numPass+numFail; n++ {
    62  		in := NewInstance(t,
    63  			WithRoot(root),
    64  			WithParticipants(participants),
    65  			WithLocalID(participants[n].NodeID),
    66  			WithTimeouts(timeouts),
    67  			WithStopCondition(ViewReached(finalView)),
    68  			WithOutgoingVotes(BlockAllVotes),
    69  			WithOutgoingProposals(BlockAllProposals),
    70  			WithIncomingProposals(BlockAllProposals),
    71  		)
    72  		instances = append(instances, in)
    73  	}
    74  
    75  	// connect the communicators of the instances together
    76  	Connect(instances)
    77  
    78  	// start all seven instances and wait for them to wrap up
    79  	var wg sync.WaitGroup
    80  	for _, in := range instances {
    81  		wg.Add(1)
    82  		go func(in *Instance) {
    83  			err := in.Run()
    84  			require.True(t, errors.Is(err, errStopCondition))
    85  			wg.Done()
    86  		}(in)
    87  	}
    88  	wg.Wait()
    89  
    90  	// check that all instances have the same finalized block
    91  	ref := instances[0]
    92  	assert.Less(t, finalView-uint64(2*numPass+numFail), ref.forks.FinalizedBlock().View, "expect instance 0 should made enough progress, but didn't")
    93  	finalizedViews := FinalizedViews(ref)
    94  	for i := 1; i < numPass; i++ {
    95  		assert.Equal(t, ref.forks.FinalizedBlock(), instances[i].forks.FinalizedBlock(), "instance %d should have same finalized block as first instance")
    96  		assert.Equal(t, finalizedViews, FinalizedViews(instances[i]), "instance %d should have same finalized view as first instance")
    97  	}
    98  }
    99  
   100  // If 1 node is down in a 4 nodes cluster, the rest of 3 nodes can
   101  // still make progress, but no block will be finalized, because
   102  // finalization requires 2-direct chain and a QC
   103  func Test1TimeoutOutof4Instances(t *testing.T) {
   104  
   105  	// test parameters
   106  	// NOTE: block finalization seems to be rather slow on CI at the moment,
   107  	// needing around 1 minute on Travis for 1000 blocks and 10 minutes on
   108  	// TeamCity for 1000 blocks; in order to avoid test timeouts, we keep the
   109  	// number low here
   110  	numPass := 3
   111  	numFail := 1
   112  	finalView := uint64(30)
   113  
   114  	// generate the 4 hotstuff participants
   115  	participants := unittest.IdentityListFixture(numPass + numFail)
   116  	instances := make([]*Instance, 0, numPass+numFail)
   117  	root := DefaultRoot()
   118  	timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 0.5, 1.5, 0.85, 0)
   119  	require.NoError(t, err)
   120  
   121  	// set up three instances that work fully
   122  	for n := 0; n < numPass; n++ {
   123  		in := NewInstance(t,
   124  			WithRoot(root),
   125  			WithParticipants(participants),
   126  			WithLocalID(participants[n].NodeID),
   127  			WithTimeouts(timeouts),
   128  			WithStopCondition(ViewReached(finalView)),
   129  		)
   130  		instances = append(instances, in)
   131  	}
   132  
   133  	// set up one instance which can't vote, nor propose
   134  	for n := numPass; n < numPass+numFail; n++ {
   135  		in := NewInstance(t,
   136  			WithRoot(root),
   137  			WithParticipants(participants),
   138  			WithLocalID(participants[n].NodeID),
   139  			WithTimeouts(timeouts),
   140  			WithStopCondition(ViewReached(finalView)),
   141  			WithOutgoingVotes(BlockAllVotes),
   142  			WithOutgoingProposals(BlockAllProposals),
   143  			WithIncomingProposals(BlockAllProposals),
   144  		)
   145  		instances = append(instances, in)
   146  	}
   147  
   148  	// connect the communicators of the instances together
   149  	Connect(instances)
   150  
   151  	// start the instances and wait for them to finish
   152  	var wg sync.WaitGroup
   153  	for _, in := range instances {
   154  		wg.Add(1)
   155  		go func(in *Instance) {
   156  			err := in.Run()
   157  			require.True(t, errors.Is(err, errStopCondition), "should run until stop condition")
   158  			wg.Done()
   159  		}(in)
   160  	}
   161  	wg.Wait()
   162  
   163  	// check that all instances have the same finalized block
   164  	ref := instances[0]
   165  	finalizedViews := FinalizedViews(ref)
   166  	assert.Equal(t, []uint64{0}, finalizedViews, "no view was finalized, because finalization requires 2 direct chain plus a QC which never happen in this case")
   167  	for i := 1; i < numPass; i++ {
   168  		assert.Equal(t, ref.forks.FinalizedBlock(), instances[i].forks.FinalizedBlock(), "instance %d should have same finalized block as first instance")
   169  		assert.Equal(t, finalizedViews, FinalizedViews(instances[i]), "instance %d should have same finalized view as first instance")
   170  	}
   171  }
   172  
   173  // If 1 node is down in a 5 nodes cluster, the rest of 4 nodes can
   174  // make progress and reach consensus
   175  func Test1TimeoutOutof5Instances(t *testing.T) {
   176  
   177  	// test parameters
   178  	// NOTE: block finalization seems to be rather slow on CI at the moment,
   179  	// needing around 1 minute on Travis for 1000 blocks and 10 minutes on
   180  	// TeamCity for 1000 blocks; in order to avoid test timeouts, we keep the
   181  	// number low here
   182  	numPass := 4
   183  	numFail := 1
   184  	finalView := uint64(30)
   185  
   186  	// generate the seven hotstuff participants
   187  	participants := unittest.IdentityListFixture(numPass + numFail)
   188  	instances := make([]*Instance, 0, numPass+numFail)
   189  	root := DefaultRoot()
   190  	timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 0.5, 1.5, 0.85, 0)
   191  	require.NoError(t, err)
   192  
   193  	// set up instances that work fully
   194  	for n := 0; n < numPass; n++ {
   195  		in := NewInstance(t,
   196  			WithRoot(root),
   197  			WithParticipants(participants),
   198  			WithLocalID(participants[n].NodeID),
   199  			WithTimeouts(timeouts),
   200  			WithStopCondition(ViewReached(finalView)),
   201  		)
   202  		instances = append(instances, in)
   203  	}
   204  
   205  	// set up one instance which can't vote, nor propose
   206  	for n := numPass; n < numPass+numFail; n++ {
   207  		in := NewInstance(t,
   208  			WithRoot(root),
   209  			WithParticipants(participants),
   210  			WithLocalID(participants[n].NodeID),
   211  			WithTimeouts(timeouts),
   212  			WithStopCondition(ViewReached(finalView)),
   213  			WithOutgoingVotes(BlockAllVotes),
   214  			WithOutgoingProposals(BlockAllProposals),
   215  			WithIncomingProposals(BlockAllProposals),
   216  		)
   217  		instances = append(instances, in)
   218  	}
   219  
   220  	// connect the communicators of the instances together
   221  	Connect(instances)
   222  
   223  	// start all seven instances and wait for them to wrap up
   224  	var wg sync.WaitGroup
   225  	for _, in := range instances {
   226  		wg.Add(1)
   227  		go func(in *Instance) {
   228  			err := in.Run()
   229  			require.True(t, errors.Is(err, errStopCondition))
   230  			wg.Done()
   231  		}(in)
   232  	}
   233  	wg.Wait()
   234  
   235  	// check that all instances have the same finalized block
   236  	ref := instances[0]
   237  	finalizedViews := FinalizedViews(ref)
   238  	assert.Less(t, finalView-uint64(2*numPass+numFail), ref.forks.FinalizedBlock().View, "expect instance 0 should made enough progress, but didn't")
   239  	for i := 1; i < numPass; i++ {
   240  		assert.Equal(t, ref.forks.FinalizedBlock(), instances[i].forks.FinalizedBlock(), "instance %d should have same finalized block as first instance")
   241  		assert.Equal(t, finalizedViews, FinalizedViews(instances[i]), "instance %d should have same finalized view as first instance")
   242  	}
   243  }