github.com/NebulousLabs/Sia@v1.3.7/modules/consensus/synchronize_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"io"
     7  	"net"
     8  	"path/filepath"
     9  	"strconv"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/NebulousLabs/Sia/build"
    15  	"github.com/NebulousLabs/Sia/crypto"
    16  	"github.com/NebulousLabs/Sia/encoding"
    17  	"github.com/NebulousLabs/Sia/modules"
    18  	"github.com/NebulousLabs/Sia/modules/gateway"
    19  	"github.com/NebulousLabs/Sia/types"
    20  
    21  	"github.com/coreos/bbolt"
    22  )
    23  
    24  // TestSynchronize tests that the consensus set can successfully synchronize
    25  // to a peer.
    26  func TestSynchronize(t *testing.T) {
    27  	if testing.Short() {
    28  		t.SkipNow()
    29  	}
    30  	cst1, err := createConsensusSetTester(t.Name() + "1")
    31  	if err != nil {
    32  		t.Fatal(err)
    33  	}
    34  	defer cst1.Close()
    35  	cst2, err := createConsensusSetTester(t.Name() + "2")
    36  	if err != nil {
    37  		t.Fatal(err)
    38  	}
    39  	defer cst2.Close()
    40  
    41  	// mine on cst2 until it is above cst1
    42  	for cst1.cs.dbBlockHeight() >= cst2.cs.dbBlockHeight() {
    43  		b, _ := cst2.miner.FindBlock()
    44  		err = cst2.cs.AcceptBlock(b)
    45  		if err != nil {
    46  			t.Fatal(err)
    47  		}
    48  	}
    49  
    50  	// connect gateways, triggering a Synchronize
    51  	err = cst1.gateway.Connect(cst2.gateway.Address())
    52  	if err != nil {
    53  		t.Fatal(err)
    54  	}
    55  
    56  	// blockchains should now match
    57  	for i := 0; i < 50; i++ {
    58  		if cst1.cs.dbCurrentBlockID() != cst2.cs.dbCurrentBlockID() {
    59  			time.Sleep(250 * time.Millisecond)
    60  		}
    61  	}
    62  	if cst1.cs.dbCurrentBlockID() != cst2.cs.dbCurrentBlockID() {
    63  		t.Fatal("Synchronize failed")
    64  	}
    65  
    66  	// Mine on cst2 until it is more than 'MaxCatchUpBlocks' ahead of cst1.
    67  	// NOTE: we have to disconnect prior to this, otherwise cst2 will relay
    68  	// blocks to cst1.
    69  	cst1.gateway.Disconnect(cst2.gateway.Address())
    70  	cst2.gateway.Disconnect(cst1.gateway.Address())
    71  	for cst2.cs.dbBlockHeight() < cst1.cs.dbBlockHeight()+3+MaxCatchUpBlocks {
    72  		_, err := cst2.miner.AddBlock()
    73  		if err != nil {
    74  			t.Fatal(err)
    75  		}
    76  	}
    77  	// reconnect
    78  	err = cst1.gateway.Connect(cst2.gateway.Address())
    79  	if err != nil {
    80  		t.Fatal(err)
    81  	}
    82  
    83  	// block heights should now match
    84  	for i := 0; i < 50; i++ {
    85  		if cst1.cs.dbBlockHeight() != cst2.cs.dbBlockHeight() {
    86  			time.Sleep(250 * time.Millisecond)
    87  		}
    88  	}
    89  	if cst1.cs.dbBlockHeight() != cst2.cs.dbBlockHeight() {
    90  		t.Fatal("synchronize failed")
    91  	}
    92  
    93  	// extend cst2 with a "bad" (old) block, and synchronize. cst1 should
    94  	// reject the bad block.
    95  	cst2.cs.mu.Lock()
    96  	id, err := cst2.cs.dbGetPath(0)
    97  	if err != nil {
    98  		t.Fatal(err)
    99  	}
   100  	cst2.cs.dbPushPath(id)
   101  	cst2.cs.mu.Unlock()
   102  
   103  	// Sleep for a few seconds to allow the network call between the two time
   104  	// to occur.
   105  	time.Sleep(5 * time.Second)
   106  	if cst1.cs.dbBlockHeight() == cst2.cs.dbBlockHeight() {
   107  		t.Fatal("cst1 did not reject bad block")
   108  	}
   109  }
   110  
   111  // TestBlockHistory tests that blockHistory returns the expected sequence of
   112  // block IDs.
   113  func TestBlockHistory(t *testing.T) {
   114  	if testing.Short() {
   115  		t.SkipNow()
   116  	}
   117  
   118  	cst, err := createConsensusSetTester(t.Name())
   119  	if err != nil {
   120  		t.Fatal(err)
   121  	}
   122  	defer cst.Close()
   123  
   124  	// mine until we have enough blocks to test blockHistory
   125  	for cst.cs.dbBlockHeight() < 50 {
   126  		b, _ := cst.miner.FindBlock()
   127  		err = cst.cs.AcceptBlock(b)
   128  		if err != nil {
   129  			t.Fatal(err)
   130  		}
   131  	}
   132  
   133  	var history [32]types.BlockID
   134  	_ = cst.cs.db.View(func(tx *bolt.Tx) error {
   135  		history = blockHistory(tx)
   136  		return nil
   137  	})
   138  
   139  	// validate history
   140  	cst.cs.mu.Lock()
   141  	// first 10 IDs are linear
   142  	for i := types.BlockHeight(0); i < 10; i++ {
   143  		id, err := cst.cs.dbGetPath(cst.cs.dbBlockHeight() - i)
   144  		if err != nil {
   145  			t.Fatal(err)
   146  		}
   147  		if history[i] != id {
   148  			t.Errorf("Wrong ID in history: expected %v, got %v", id, history[i])
   149  		}
   150  	}
   151  	// next 4 IDs are exponential
   152  	heights := []types.BlockHeight{11, 15, 23, 39}
   153  	for i, height := range heights {
   154  		id, err := cst.cs.dbGetPath(cst.cs.dbBlockHeight() - height)
   155  		if err != nil {
   156  			t.Fatal(err)
   157  		}
   158  		if history[10+i] != id {
   159  			t.Errorf("Wrong ID in history: expected %v, got %v", height, history[10+i])
   160  		}
   161  	}
   162  	// finally, the genesis ID
   163  	genesisID, err := cst.cs.dbGetPath(0)
   164  	if err != nil {
   165  		t.Fatal(err)
   166  	}
   167  	if history[31] != genesisID {
   168  		t.Errorf("Wrong ID in history: expected %v, got %v", genesisID, history[31])
   169  	}
   170  
   171  	cst.cs.mu.Unlock()
   172  
   173  	// remaining IDs should be empty
   174  	var emptyID types.BlockID
   175  	for i, id := range history[14:31] {
   176  		if id != emptyID {
   177  			t.Errorf("Expected empty ID at index %v, got %v", i+17, id)
   178  		}
   179  	}
   180  }
   181  
   182  // mockGatewayCountBroadcasts implements modules.Gateway to mock the Broadcast
   183  // method.
   184  type mockGatewayCountBroadcasts struct {
   185  	modules.Gateway
   186  	numBroadcasts int
   187  	mu            sync.RWMutex
   188  }
   189  
   190  // Broadcast is a mock implementation of modules.Gateway.Broadcast that
   191  // increments a counter denoting the number of times it's been called.
   192  func (g *mockGatewayCountBroadcasts) Broadcast(name string, obj interface{}, peers []modules.Peer) {
   193  	g.mu.Lock()
   194  	g.numBroadcasts++
   195  	g.mu.Unlock()
   196  	g.Gateway.Broadcast(name, obj, peers)
   197  }
   198  
   199  // TestSendBlocksBroadcastsOnce tests that the SendBlocks RPC call only
   200  // Broadcasts one block, no matter how many blocks are sent. In the case 0
   201  // blocks are sent, tests that Broadcast is never called.
   202  func TestSendBlocksBroadcastsOnce(t *testing.T) {
   203  	if testing.Short() {
   204  		t.SkipNow()
   205  	}
   206  
   207  	// Setup consensus sets.
   208  	cst1, err := blankConsensusSetTester(t.Name()+"1", modules.ProdDependencies)
   209  	if err != nil {
   210  		t.Fatal(err)
   211  	}
   212  	defer cst1.Close()
   213  	cst2, err := blankConsensusSetTester(t.Name()+"2", modules.ProdDependencies)
   214  	if err != nil {
   215  		t.Fatal(err)
   216  	}
   217  	defer cst2.Close()
   218  	// Setup mock gateway.
   219  	mg := mockGatewayCountBroadcasts{Gateway: cst1.cs.gateway}
   220  	cst1.cs.gateway = &mg
   221  	err = cst1.cs.gateway.Connect(cst2.cs.gateway.Address())
   222  	if err != nil {
   223  		t.Fatal(err)
   224  	}
   225  
   226  	tests := []struct {
   227  		blocksToMine          int
   228  		expectedNumBroadcasts int
   229  		synced                bool
   230  	}{
   231  		// Test that no blocks are broadcast during IBD.
   232  		{
   233  			blocksToMine:          0,
   234  			expectedNumBroadcasts: 0,
   235  			synced:                false,
   236  		},
   237  		{
   238  			blocksToMine:          1,
   239  			expectedNumBroadcasts: 0,
   240  			synced:                false,
   241  		},
   242  		{
   243  			blocksToMine:          2,
   244  			expectedNumBroadcasts: 0,
   245  			synced:                false,
   246  		},
   247  		// Test that only one blocks is broadcast when IBD is done.
   248  		{
   249  			blocksToMine:          0,
   250  			expectedNumBroadcasts: 0,
   251  			synced:                true,
   252  		},
   253  		{
   254  			blocksToMine:          1,
   255  			expectedNumBroadcasts: 1,
   256  			synced:                true,
   257  		},
   258  		{
   259  			blocksToMine:          2,
   260  			expectedNumBroadcasts: 1,
   261  			synced:                true,
   262  		},
   263  		{
   264  			blocksToMine:          int(MaxCatchUpBlocks),
   265  			expectedNumBroadcasts: 1,
   266  			synced:                true,
   267  		},
   268  		{
   269  			blocksToMine:          2 * int(MaxCatchUpBlocks),
   270  			expectedNumBroadcasts: 1,
   271  			synced:                true,
   272  		},
   273  		{
   274  			blocksToMine:          2*int(MaxCatchUpBlocks) + 1,
   275  			expectedNumBroadcasts: 1,
   276  			synced:                true,
   277  		},
   278  	}
   279  	for j, test := range tests {
   280  		cst1.cs.mu.Lock()
   281  		cst1.cs.synced = test.synced
   282  		cst1.cs.mu.Unlock()
   283  		mg.mu.Lock()
   284  		mg.numBroadcasts = 0
   285  		mg.mu.Unlock()
   286  		for i := 0; i < test.blocksToMine; i++ {
   287  			b, minerErr := cst2.miner.FindBlock()
   288  			if minerErr != nil {
   289  				t.Fatal(minerErr)
   290  			}
   291  			// managedAcceptBlock is used here instead of AcceptBlock so as not to
   292  			// call Broadcast outside of the SendBlocks RPC.
   293  			_, err = cst2.cs.managedAcceptBlocks([]types.Block{b})
   294  			if err != nil {
   295  				t.Fatal(err)
   296  			}
   297  		}
   298  		err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlocks", cst1.cs.threadedReceiveBlocks)
   299  		if err != nil {
   300  			t.Fatal(err)
   301  		}
   302  		// Sleep to wait for possible calls to Broadcast to complete. We cannot
   303  		// wait on a channel because we don't know how many times broadcast has
   304  		// been called.
   305  		time.Sleep(10 * time.Millisecond)
   306  		mg.mu.RLock()
   307  		numBroadcasts := mg.numBroadcasts
   308  		mg.mu.RUnlock()
   309  		if numBroadcasts != test.expectedNumBroadcasts {
   310  			t.Errorf("test #%d: expected %d number of broadcasts, got %d", j, test.expectedNumBroadcasts, numBroadcasts)
   311  		}
   312  	}
   313  }
   314  
   315  // TestIntegrationRPCSendBlocks tests that the SendBlocks RPC adds blocks to
   316  // the consensus set, and that the consensus set catches with the remote peer
   317  // and possibly reorgs.
   318  func TestIntegrationRPCSendBlocks(t *testing.T) {
   319  	if testing.Short() || !build.VLONG {
   320  		t.SkipNow()
   321  	}
   322  
   323  	type sendBlocksTest struct {
   324  		commonBlocksToMine types.BlockHeight
   325  		localBlocksToMine  types.BlockHeight
   326  		remoteBlocksToMine types.BlockHeight
   327  		msg                string
   328  	}
   329  	tests := []sendBlocksTest{
   330  		{
   331  			msg: "SendBlocks shouldn't do anything when both CSs are at the genesis block",
   332  		},
   333  		{
   334  			commonBlocksToMine: 10,
   335  			msg:                "SendBlocks shouldn't do anything when both CSs are at the same block",
   336  		},
   337  		{
   338  			commonBlocksToMine: 10,
   339  			localBlocksToMine:  5,
   340  			msg:                "SendBlocks shouldn't do anything when the remote CS is behind the local CS",
   341  		},
   342  		{
   343  			commonBlocksToMine: 10,
   344  			remoteBlocksToMine: 5,
   345  			msg:                "SendBlocks should catch up the local CS to the remote CS when it is behind",
   346  		},
   347  		{
   348  			remoteBlocksToMine: 10,
   349  			localBlocksToMine:  5,
   350  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   351  		},
   352  		{
   353  			commonBlocksToMine: 10,
   354  			remoteBlocksToMine: 10,
   355  			localBlocksToMine:  5,
   356  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   357  		},
   358  		{
   359  			remoteBlocksToMine: MaxCatchUpBlocks - 1,
   360  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   361  		},
   362  		{
   363  			remoteBlocksToMine: MaxCatchUpBlocks - 1,
   364  			localBlocksToMine:  MaxCatchUpBlocks - 2,
   365  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   366  		},
   367  		{
   368  			remoteBlocksToMine: MaxCatchUpBlocks,
   369  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   370  		},
   371  		{
   372  			remoteBlocksToMine: MaxCatchUpBlocks,
   373  			localBlocksToMine:  MaxCatchUpBlocks - 2,
   374  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   375  		},
   376  		{
   377  			remoteBlocksToMine: MaxCatchUpBlocks + 1, // There was a bug that caused SendBlocks to be one block behind when its peer was ahead by (k * MaxCatchUpBlocks) + 1 blocks.
   378  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   379  		},
   380  		{
   381  			remoteBlocksToMine: MaxCatchUpBlocks + 1, // There was a bug that caused SendBlocks to be one block behind when its peer was ahead by (k * MaxCatchUpBlocks) + 1 blocks.
   382  			localBlocksToMine:  MaxCatchUpBlocks - 2,
   383  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   384  		},
   385  		{
   386  			remoteBlocksToMine: 2*MaxCatchUpBlocks + 1,
   387  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   388  		},
   389  		{
   390  			remoteBlocksToMine: 2*MaxCatchUpBlocks + 1,
   391  			localBlocksToMine:  2*MaxCatchUpBlocks - 2,
   392  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   393  		},
   394  		{
   395  			remoteBlocksToMine: 12,
   396  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   397  		},
   398  		{
   399  			remoteBlocksToMine: 15,
   400  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   401  		},
   402  		{
   403  			remoteBlocksToMine: 16,
   404  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   405  		},
   406  		{
   407  			remoteBlocksToMine: 17,
   408  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   409  		},
   410  		{
   411  			remoteBlocksToMine: 23,
   412  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   413  		},
   414  		{
   415  			remoteBlocksToMine: 31,
   416  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   417  		},
   418  		{
   419  			remoteBlocksToMine: 32,
   420  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   421  		},
   422  		{
   423  			remoteBlocksToMine: 33,
   424  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   425  		},
   426  	}
   427  	for i := 1; i < 10; i++ {
   428  		tests = append(tests, sendBlocksTest{
   429  			remoteBlocksToMine: types.BlockHeight(i),
   430  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   431  		})
   432  	}
   433  
   434  	for i, tt := range tests {
   435  		// Create the "remote" peer.
   436  		remoteCST, err := blankConsensusSetTester(filepath.Join(t.Name()+" - remote", strconv.Itoa(i)), modules.ProdDependencies)
   437  		if err != nil {
   438  			t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   439  		}
   440  		// Create the "local" peer.
   441  		localCST, err := blankConsensusSetTester(filepath.Join(t.Name()+" - local", strconv.Itoa(i)), modules.ProdDependencies)
   442  		if err != nil {
   443  			t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   444  		}
   445  
   446  		localCST.cs.gateway.Connect(remoteCST.cs.gateway.Address())
   447  		// Wait a second to let the OnConnectRPCs finish
   448  		time.Sleep(100 * time.Millisecond)
   449  
   450  		// Mine blocks.
   451  		for i := types.BlockHeight(0); i < tt.commonBlocksToMine; i++ {
   452  			b, err := remoteCST.miner.FindBlock()
   453  			if err != nil {
   454  				t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   455  			}
   456  			_, err = remoteCST.cs.managedAcceptBlocks([]types.Block{b})
   457  			if err != nil {
   458  				t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   459  			}
   460  			_, err = localCST.cs.managedAcceptBlocks([]types.Block{b})
   461  			if err != nil {
   462  				t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   463  			}
   464  		}
   465  		for i := types.BlockHeight(0); i < tt.remoteBlocksToMine; i++ {
   466  			b, err := remoteCST.miner.FindBlock()
   467  			if err != nil {
   468  				t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   469  			}
   470  			_, err = remoteCST.cs.managedAcceptBlocks([]types.Block{b})
   471  			if err != nil {
   472  				t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   473  			}
   474  		}
   475  		for i := types.BlockHeight(0); i < tt.localBlocksToMine; i++ {
   476  			b, err := localCST.miner.FindBlock()
   477  			if err != nil {
   478  				t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   479  			}
   480  			_, err = localCST.cs.managedAcceptBlocks([]types.Block{b})
   481  			if err != nil {
   482  				t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   483  			}
   484  		}
   485  
   486  		localCurrentBlockID := localCST.cs.CurrentBlock().ID()
   487  		remoteCurrentBlockID := remoteCST.cs.CurrentBlock().ID()
   488  
   489  		err = localCST.cs.gateway.RPC(remoteCST.cs.gateway.Address(), "SendBlocks", localCST.cs.threadedReceiveBlocks)
   490  		if err != nil {
   491  			t.Errorf("test #%d, %v: %v", i, tt.msg, err)
   492  		}
   493  
   494  		// Assume that if remoteBlocksToMine is greater than localBlocksToMine, then
   495  		// the local CS must have received the new blocks (and reorged).
   496  		if tt.remoteBlocksToMine > tt.localBlocksToMine {
   497  			// Verify that the remote cs did not change.
   498  			if remoteCST.cs.CurrentBlock().ID() != remoteCurrentBlockID {
   499  				t.Errorf("test #%d, %v: the remote CS is at a different current block than before SendBlocks", i, tt.msg)
   500  			}
   501  			// Verify that the local cs got the new blocks.
   502  			if localCST.cs.Height() != remoteCST.cs.Height() {
   503  				t.Errorf("test #%d, %v: expected height %v, got %v", i, tt.msg, remoteCST.cs.Height(), localCST.cs.Height())
   504  			}
   505  			if localCST.cs.CurrentBlock().ID() != remoteCST.cs.CurrentBlock().ID() {
   506  				t.Errorf("test #%d, %v: remote and local CSTs have different current blocks", i, tt.msg)
   507  			}
   508  		} else {
   509  			// Verify that the local cs did not change.
   510  			if localCST.cs.CurrentBlock().ID() != localCurrentBlockID {
   511  				t.Errorf("test #%d, %v: the local CS is at a different current block than before SendBlocks", i, tt.msg)
   512  			}
   513  		}
   514  
   515  		// Cleanup.
   516  		err = localCST.Close()
   517  		if err != nil {
   518  			t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   519  		}
   520  		err = remoteCST.Close()
   521  		if err != nil {
   522  			t.Fatalf("test #%d, %v: %v", i, tt.msg, err)
   523  		}
   524  	}
   525  }
   526  
   527  // TestRPCSendBlockSendsOnlyNecessaryBlocks tests that the SendBlocks RPC only
   528  // sends blocks that the caller does not have and that are part of the longest
   529  // chain.
   530  func TestRPCSendBlockSendsOnlyNecessaryBlocks(t *testing.T) {
   531  	if testing.Short() {
   532  		t.SkipNow()
   533  	}
   534  
   535  	// Create the "remote" peer.
   536  	cst, err := blankConsensusSetTester(t.Name()+"- remote", modules.ProdDependencies)
   537  	if err != nil {
   538  		t.Fatal(err)
   539  	}
   540  	defer cst.Close()
   541  	// Create the "local" peer.
   542  	//
   543  	// We create this peer manually (not using blankConsensusSetTester) so that we
   544  	// can connect it to the remote peer before calling consensus.New so as to
   545  	// prevent SendBlocks from triggering on Connect.
   546  	testdir := build.TempDir(modules.ConsensusDir, t.Name()+" - local")
   547  	g, err := gateway.New("localhost:0", false, filepath.Join(testdir, modules.GatewayDir))
   548  	if err != nil {
   549  		t.Fatal(err)
   550  	}
   551  	defer g.Close()
   552  	err = g.Connect(cst.cs.gateway.Address())
   553  	if err != nil {
   554  		t.Fatal(err)
   555  	}
   556  	cs, err := New(g, false, filepath.Join(testdir, modules.ConsensusDir))
   557  	if err != nil {
   558  		t.Fatal(err)
   559  	}
   560  	defer cs.Close()
   561  
   562  	// Add a few initial blocks to both consensus sets. These are the blocks we
   563  	// want to make sure SendBlocks is not sending unnecessarily as both parties
   564  	// already have them.
   565  	knownBlocks := make(map[types.BlockID]struct{})
   566  	for i := 0; i < 20; i++ {
   567  		b, err := cst.miner.FindBlock()
   568  		if err != nil {
   569  			t.Fatal(err)
   570  		}
   571  		_, err = cst.cs.managedAcceptBlocks([]types.Block{b})
   572  		if err != nil {
   573  			t.Fatal(err)
   574  		}
   575  		_, err = cs.managedAcceptBlocks([]types.Block{b})
   576  		if err != nil {
   577  			t.Fatal(err)
   578  		}
   579  		knownBlocks[b.ID()] = struct{}{}
   580  	}
   581  
   582  	// Add a few blocks to only the remote peer and store which blocks we add.
   583  	addedBlocks := make(map[types.BlockID]struct{})
   584  	for i := 0; i < 20; i++ {
   585  		b, err := cst.miner.FindBlock()
   586  		if err != nil {
   587  			t.Fatal(err)
   588  		}
   589  		_, err = cst.cs.managedAcceptBlocks([]types.Block{b})
   590  		if err != nil {
   591  			t.Fatal(err)
   592  		}
   593  		addedBlocks[b.ID()] = struct{}{}
   594  	}
   595  
   596  	err = cs.gateway.RPC(cst.cs.gateway.Address(), "SendBlocks", func(conn modules.PeerConn) error {
   597  		// Get blockIDs to send.
   598  		var history [32]types.BlockID
   599  		cs.mu.RLock()
   600  		err := cs.db.View(func(tx *bolt.Tx) error {
   601  			history = blockHistory(tx)
   602  			return nil
   603  		})
   604  		cs.mu.RUnlock()
   605  		if err != nil {
   606  			return err
   607  		}
   608  
   609  		// Send the block ids.
   610  		if err := encoding.WriteObject(conn, history); err != nil {
   611  			return err
   612  		}
   613  
   614  		moreAvailable := true
   615  		for moreAvailable {
   616  			// Read a slice of blocks from the wire.
   617  			var newBlocks []types.Block
   618  			if err := encoding.ReadObject(conn, &newBlocks, uint64(MaxCatchUpBlocks)*types.BlockSizeLimit); err != nil {
   619  				return err
   620  			}
   621  			if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil {
   622  				return err
   623  			}
   624  
   625  			// Check if the block needed to be sent.
   626  			for _, newB := range newBlocks {
   627  				_, ok := knownBlocks[newB.ID()]
   628  				if ok {
   629  					t.Error("SendBlocks sent an unnecessary block that the caller already had")
   630  					continue
   631  				}
   632  				_, ok = addedBlocks[newB.ID()]
   633  				if !ok {
   634  					t.Error("SendBlocks sent an unnecessary block that the caller did not have")
   635  				}
   636  			}
   637  		}
   638  		return nil
   639  	})
   640  	if err != nil {
   641  		t.Fatal(err)
   642  	}
   643  }
   644  
   645  // mock PeerConns for testing peer conns that fail reading or writing.
   646  type (
   647  	mockPeerConn struct {
   648  		net.Conn
   649  	}
   650  	mockPeerConnFailingReader struct {
   651  		mockPeerConn
   652  	}
   653  	mockPeerConnFailingWriter struct {
   654  		mockPeerConn
   655  	}
   656  )
   657  
   658  var (
   659  	errFailingReader = errors.New("failing reader")
   660  	errFailingWriter = errors.New("failing writer")
   661  )
   662  
   663  // Close returns 'nil', and does nothing behind the scenes. This is because the
   664  // testing reuses pipes, but the consensus code now correctly closes conns after
   665  // handling them.
   666  func (pc mockPeerConn) Close() error {
   667  	return nil
   668  }
   669  
   670  // RPCAddr implements this method of the modules.PeerConn interface.
   671  func (pc mockPeerConn) RPCAddr() modules.NetAddress {
   672  	return "mockPeerConn dialback addr"
   673  }
   674  
   675  // SetDeadline returns 'nil', and does nothing behind the scenes.
   676  func (pc mockPeerConn) SetDeadline(time.Time) error {
   677  	return nil
   678  }
   679  
   680  // Read is a mock implementation of modules.PeerConn.Read that always returns
   681  // an error.
   682  func (mockPeerConnFailingReader) Read([]byte) (int, error) {
   683  	return 0, errFailingReader
   684  }
   685  
   686  // Write is a mock implementation of modules.PeerConn.Write that always returns
   687  // an error.
   688  func (mockPeerConnFailingWriter) Write([]byte) (int, error) {
   689  	return 0, errFailingWriter
   690  }
   691  
   692  // TestSendBlk probes the ConsensusSet.rpcSendBlk method and tests that it
   693  // correctly receives block ids and writes out the corresponding blocks.
   694  func TestSendBlk(t *testing.T) {
   695  	if testing.Short() {
   696  		t.SkipNow()
   697  	}
   698  	cst, err := blankConsensusSetTester(t.Name(), modules.ProdDependencies)
   699  	if err != nil {
   700  		t.Fatal(err)
   701  	}
   702  	defer cst.Close()
   703  
   704  	p1, p2 := net.Pipe()
   705  	mockP1 := mockPeerConn{p1}
   706  	fnErr := make(chan error)
   707  
   708  	tests := []struct {
   709  		id      types.BlockID
   710  		conn    modules.PeerConn
   711  		fn      func() // handle reading and writing over the pipe to the mock conn.
   712  		errWant error
   713  		msg     string
   714  	}{
   715  		// TODO: Test with a failing database.
   716  		// Test with a failing reader.
   717  		{
   718  			conn:    mockPeerConnFailingReader{mockP1},
   719  			fn:      func() { fnErr <- nil },
   720  			errWant: errFailingReader,
   721  			msg:     "expected rpcSendBlk to error with a failing reader conn",
   722  		},
   723  		// Test with a block id not found in the blockmap.
   724  		{
   725  			conn: mockP1,
   726  			fn: func() {
   727  				// Write a block id to the conn.
   728  				fnErr <- encoding.WriteObject(p2, types.BlockID{})
   729  			},
   730  			errWant: errNilItem,
   731  			msg:     "expected rpcSendBlk to error with a nonexistent block id",
   732  		},
   733  		// Test with a failing writer.
   734  		{
   735  			conn: mockPeerConnFailingWriter{mockP1},
   736  			fn: func() {
   737  				// Write a valid block id to the conn.
   738  				fnErr <- encoding.WriteObject(p2, types.GenesisID)
   739  			},
   740  			errWant: errFailingWriter,
   741  			msg:     "expected rpcSendBlk to error with a failing writer conn",
   742  		},
   743  		// Test with a valid conn and valid block.
   744  		{
   745  			conn: mockP1,
   746  			fn: func() {
   747  				// Write a valid block id to the conn.
   748  				if err := encoding.WriteObject(p2, types.GenesisID); err != nil {
   749  					fnErr <- err
   750  				}
   751  
   752  				// Read the block written to the conn.
   753  				var block types.Block
   754  				if err := encoding.ReadObject(p2, &block, types.BlockSizeLimit); err != nil {
   755  					fnErr <- err
   756  				}
   757  				// Verify the block is the expected block.
   758  				if block.ID() != types.GenesisID {
   759  					fnErr <- fmt.Errorf("rpcSendBlk wrote a different block to conn than the block requested. requested block id: %v, received block id: %v", types.GenesisID, block.ID())
   760  				}
   761  
   762  				fnErr <- nil
   763  			},
   764  			errWant: nil,
   765  			msg:     "expected rpcSendBlk to succeed with a valid conn and valid block",
   766  		},
   767  	}
   768  	for _, tt := range tests {
   769  		go tt.fn()
   770  		err := cst.cs.rpcSendBlk(tt.conn)
   771  		if err != tt.errWant {
   772  			t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err)
   773  		}
   774  		err = <-fnErr
   775  		if err != nil {
   776  			t.Fatal(err)
   777  		}
   778  	}
   779  }
   780  
   781  // TestThreadedReceiveBlock probes the RPCFunc returned by
   782  // cs.threadedReceiveBlock and tests that it correctly requests a block id and
   783  // receives a block. Also tests that the block is correctly (not) accepted into
   784  // the consensus set.
   785  func TestThreadedReceiveBlock(t *testing.T) {
   786  	if testing.Short() {
   787  		t.SkipNow()
   788  	}
   789  	cst, err := blankConsensusSetTester(t.Name(), modules.ProdDependencies)
   790  	if err != nil {
   791  		t.Fatal(err)
   792  	}
   793  	defer cst.Close()
   794  
   795  	p1, p2 := net.Pipe()
   796  	mockP1 := mockPeerConn{p1}
   797  	fnErr := make(chan error)
   798  
   799  	tests := []struct {
   800  		id      types.BlockID
   801  		conn    modules.PeerConn
   802  		fn      func() // handle reading and writing over the pipe to the mock conn.
   803  		errWant error
   804  		msg     string
   805  	}{
   806  		// Test with failing writer.
   807  		{
   808  			conn:    mockPeerConnFailingWriter{mockP1},
   809  			fn:      func() { fnErr <- nil },
   810  			errWant: errFailingWriter,
   811  			msg:     "the function returned from threadedReceiveBlock should fail with a PeerConn with a failing writer",
   812  		},
   813  		// Test with failing reader.
   814  		{
   815  			conn: mockPeerConnFailingReader{mockP1},
   816  			fn: func() {
   817  				// Read the id written to conn.
   818  				var id types.BlockID
   819  				if err := encoding.ReadObject(p2, &id, crypto.HashSize); err != nil {
   820  					fnErr <- err
   821  				}
   822  				// Verify the id is the expected id.
   823  				expectedID := types.BlockID{}
   824  				if id != expectedID {
   825  					fnErr <- fmt.Errorf("id written to conn was %v, but id received was %v", expectedID, id)
   826  				}
   827  				fnErr <- nil
   828  			},
   829  			errWant: errFailingReader,
   830  			msg:     "the function returned from threadedReceiveBlock should fail with a PeerConn with a failing reader",
   831  		},
   832  		// Test with a valid conn, but an invalid block.
   833  		{
   834  			id:   types.BlockID{1},
   835  			conn: mockP1,
   836  			fn: func() {
   837  				// Read the id written to conn.
   838  				var id types.BlockID
   839  				if err := encoding.ReadObject(p2, &id, crypto.HashSize); err != nil {
   840  					fnErr <- err
   841  				}
   842  				// Verify the id is the expected id.
   843  				expectedID := types.BlockID{1}
   844  				if id != expectedID {
   845  					fnErr <- fmt.Errorf("id written to conn was %v, but id received was %v", expectedID, id)
   846  				}
   847  
   848  				// Write an invalid block to conn.
   849  				block := types.Block{}
   850  				if err := encoding.WriteObject(p2, block); err != nil {
   851  					fnErr <- err
   852  				}
   853  
   854  				fnErr <- nil
   855  			},
   856  			errWant: errOrphan,
   857  			msg:     "the function returned from threadedReceiveBlock should not accept an invalid block",
   858  		},
   859  		// Test with a valid conn and a valid block.
   860  		{
   861  			id:   types.BlockID{2},
   862  			conn: mockP1,
   863  			fn: func() {
   864  				// Read the id written to conn.
   865  				var id types.BlockID
   866  				if err := encoding.ReadObject(p2, &id, crypto.HashSize); err != nil {
   867  					fnErr <- err
   868  				}
   869  				// Verify the id is the expected id.
   870  				expectedID := types.BlockID{2}
   871  				if id != expectedID {
   872  					fnErr <- fmt.Errorf("id written to conn was %v, but id received was %v", expectedID, id)
   873  				}
   874  
   875  				// Write a valid block to conn.
   876  				block, err := cst.miner.FindBlock()
   877  				if err != nil {
   878  					fnErr <- err
   879  				}
   880  				if err := encoding.WriteObject(p2, block); err != nil {
   881  					fnErr <- err
   882  				}
   883  
   884  				fnErr <- nil
   885  			},
   886  			errWant: nil,
   887  			msg:     "the function returned from manageddReceiveBlock should accept a valid block",
   888  		},
   889  	}
   890  	for _, tt := range tests {
   891  		managedReceiveFN := cst.cs.managedReceiveBlock(tt.id)
   892  		go tt.fn()
   893  		err := managedReceiveFN(tt.conn)
   894  		if err != tt.errWant {
   895  			t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err)
   896  		}
   897  		err = <-fnErr
   898  		if err != nil {
   899  			t.Fatal(err)
   900  		}
   901  	}
   902  }
   903  
   904  // TestIntegrationSendBlkRPC probes the SendBlk RPC and tests that blocks are
   905  // correctly requested, received, and accepted into the consensus set.
   906  func TestIntegrationSendBlkRPC(t *testing.T) {
   907  	if testing.Short() {
   908  		t.SkipNow()
   909  	}
   910  	cst1, err := blankConsensusSetTester(t.Name()+"1", modules.ProdDependencies)
   911  	if err != nil {
   912  		t.Fatal(err)
   913  	}
   914  	defer cst1.Close()
   915  	cst2, err := blankConsensusSetTester(t.Name()+"2", modules.ProdDependencies)
   916  	if err != nil {
   917  		t.Fatal(err)
   918  	}
   919  	defer cst2.Close()
   920  
   921  	err = cst1.cs.gateway.Connect(cst2.cs.gateway.Address())
   922  	if err != nil {
   923  		t.Fatal(err)
   924  	}
   925  	// Sleep to give the consensus sets time to finish the background startup
   926  	// routines - if the block mined below is mined before the sets finish
   927  	// synchronizing to each other, it screws up the test.
   928  	time.Sleep(500 * time.Millisecond)
   929  
   930  	// Test that cst1 doesn't accept a block it's already seen (the genesis block).
   931  	err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.managedReceiveBlock(types.GenesisID))
   932  	if err != modules.ErrBlockKnown && err != modules.ErrNonExtendingBlock {
   933  		t.Errorf("cst1 should reject known blocks: expected error '%v', got '%v'", modules.ErrBlockKnown, err)
   934  	}
   935  	// Test that cst2 errors when it doesn't recognize the requested block.
   936  	err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.managedReceiveBlock(types.BlockID{}))
   937  	if err != io.EOF {
   938  		t.Errorf("cst2 shouldn't return a block it doesn't recognize: expected error '%v', got '%v'", io.EOF, err)
   939  	}
   940  
   941  	// Test that cst1 accepts a block that extends its longest chain.
   942  	block, err := cst2.miner.FindBlock()
   943  	if err != nil {
   944  		t.Fatal(err)
   945  	}
   946  	_, err = cst2.cs.managedAcceptBlocks([]types.Block{block}) // Call managedAcceptBlock so that the block isn't broadcast.
   947  	if err != nil {
   948  		t.Fatal(err)
   949  	}
   950  	err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.managedReceiveBlock(block.ID()))
   951  	if err != nil {
   952  		t.Errorf("cst1 should accept a block that extends its longest chain: expected nil error, got '%v'", err)
   953  	}
   954  
   955  	// Test that cst2 accepts a block that extends its longest chain.
   956  	block, err = cst1.miner.FindBlock()
   957  	if err != nil {
   958  		t.Fatal(err)
   959  	}
   960  	_, err = cst1.cs.managedAcceptBlocks([]types.Block{block}) // Call managedAcceptBlock so that the block isn't broadcast.
   961  	if err != nil {
   962  		t.Fatal(err)
   963  	}
   964  	err = cst2.cs.gateway.RPC(cst1.cs.gateway.Address(), "SendBlk", cst2.cs.managedReceiveBlock(block.ID()))
   965  	if err != nil {
   966  		t.Errorf("cst2 should accept a block that extends its longest chain: expected nil error, got '%v'", err)
   967  	}
   968  
   969  	// Test that cst1 doesn't accept an orphan block.
   970  	block, err = cst2.miner.FindBlock()
   971  	if err != nil {
   972  		t.Fatal(err)
   973  	}
   974  	_, err = cst2.cs.managedAcceptBlocks([]types.Block{block}) // Call managedAcceptBlock so that the block isn't broadcast.
   975  	if err != nil {
   976  		t.Fatal(err)
   977  	}
   978  	block, err = cst2.miner.FindBlock()
   979  	if err != nil {
   980  		t.Fatal(err)
   981  	}
   982  	_, err = cst2.cs.managedAcceptBlocks([]types.Block{block}) // Call managedAcceptBlock so that the block isn't broadcast.
   983  	if err != nil {
   984  		t.Fatal(err)
   985  	}
   986  	err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.managedReceiveBlock(block.ID()))
   987  	if err != errOrphan {
   988  		t.Errorf("cst1 should not accept an orphan block: expected error '%v', got '%v'", errOrphan, err)
   989  	}
   990  }
   991  
   992  type mockGatewayCallsRPC struct {
   993  	modules.Gateway
   994  	rpcCalled chan string
   995  }
   996  
   997  func (g *mockGatewayCallsRPC) RPC(addr modules.NetAddress, name string, fn modules.RPCFunc) error {
   998  	g.rpcCalled <- name
   999  	return nil
  1000  }
  1001  
  1002  // TestRelayHeader tests that rpcRelayHeader requests the corresponding blocks
  1003  // to valid headers with known parents, or requests the block history to orphan
  1004  // headers.
  1005  func TestRelayHeader(t *testing.T) {
  1006  	if testing.Short() {
  1007  		t.SkipNow()
  1008  	}
  1009  	cst, err := blankConsensusSetTester(t.Name(), modules.ProdDependencies)
  1010  	if err != nil {
  1011  		t.Fatal(err)
  1012  	}
  1013  	defer cst.Close()
  1014  
  1015  	mg := &mockGatewayCallsRPC{
  1016  		Gateway:   cst.cs.gateway,
  1017  		rpcCalled: make(chan string),
  1018  	}
  1019  	cst.cs.gateway = mg
  1020  
  1021  	p1, p2 := net.Pipe()
  1022  	mockP2 := mockPeerConn{p2}
  1023  
  1024  	// Valid block that rpcRelayHeader should accept.
  1025  	validBlock, err := cst.miner.FindBlock()
  1026  	if err != nil {
  1027  		t.Fatal(err)
  1028  	}
  1029  
  1030  	// A block in the near future that rpcRelayHeader return an error for, but
  1031  	// still request the corresponding block.
  1032  	block, target, err := cst.miner.BlockForWork()
  1033  	if err != nil {
  1034  		t.Fatal(err)
  1035  	}
  1036  	block.Timestamp = types.CurrentTimestamp() + 2 + types.FutureThreshold
  1037  	futureBlock, _ := cst.miner.SolveBlock(block, target)
  1038  
  1039  	tests := []struct {
  1040  		header  types.BlockHeader
  1041  		errWant error
  1042  		errMSG  string
  1043  		rpcWant string
  1044  		rpcMSG  string
  1045  	}{
  1046  		// Test that rpcRelayHeader rejects known blocks.
  1047  		{
  1048  			header:  types.GenesisBlock.Header(),
  1049  			errWant: modules.ErrBlockKnown,
  1050  			errMSG:  "rpcRelayHeader should reject headers to known blocks",
  1051  		},
  1052  		// Test that rpcRelayHeader requests the parent blocks of orphan headers.
  1053  		{
  1054  			header:  types.BlockHeader{},
  1055  			errWant: nil,
  1056  			errMSG:  "rpcRelayHeader should not return an error for orphan headers",
  1057  			rpcWant: "SendBlocks",
  1058  			rpcMSG:  "rpcRelayHeader should request blocks when the relayed header is an orphan",
  1059  		},
  1060  		// Test that rpcRelayHeader accepts a valid header that extends the longest chain.
  1061  		{
  1062  			header:  validBlock.Header(),
  1063  			errWant: nil,
  1064  			errMSG:  "rpcRelayHeader should accept a valid header",
  1065  			rpcWant: "SendBlk",
  1066  			rpcMSG:  "rpcRelayHeader should request the block of a valid header",
  1067  		},
  1068  		// Test that rpcRelayHeader requests a future, but otherwise valid block.
  1069  		{
  1070  			header:  futureBlock.Header(),
  1071  			errWant: nil,
  1072  			errMSG:  "rpcRelayHeader should not return an error for a future header",
  1073  			rpcWant: "SendBlk",
  1074  			rpcMSG:  "rpcRelayHeader should request the corresponding block to a future, but otherwise valid header",
  1075  		},
  1076  	}
  1077  	errChan := make(chan error)
  1078  	for _, tt := range tests {
  1079  		go func() {
  1080  			errChan <- encoding.WriteObject(p1, tt.header)
  1081  		}()
  1082  		err = cst.cs.threadedRPCRelayHeader(mockP2)
  1083  		if err != tt.errWant {
  1084  			t.Errorf("%s: expected '%v', got '%v'", tt.errMSG, tt.errWant, err)
  1085  		}
  1086  		err = <-errChan
  1087  		if err != nil {
  1088  			t.Fatal(err)
  1089  		}
  1090  		if tt.rpcWant == "" {
  1091  			select {
  1092  			case rpc := <-mg.rpcCalled:
  1093  				t.Errorf("no RPC call expected, but '%v' was called", rpc)
  1094  			case <-time.After(10 * time.Millisecond):
  1095  			}
  1096  		} else {
  1097  			select {
  1098  			case rpc := <-mg.rpcCalled:
  1099  				if rpc != tt.rpcWant {
  1100  					t.Errorf("%s: expected '%v', got '%v'", tt.rpcMSG, tt.rpcWant, rpc)
  1101  				}
  1102  			case <-time.After(10 * time.Millisecond):
  1103  				t.Errorf("%s: expected '%v', but no RPC was called", tt.rpcMSG, tt.rpcWant)
  1104  			}
  1105  		}
  1106  	}
  1107  }
  1108  
  1109  // TestIntegrationBroadcastRelayHeader checks that broadcasting RelayHeader
  1110  // causes peers to also broadcast the header (if the block is valid).
  1111  func TestIntegrationBroadcastRelayHeader(t *testing.T) {
  1112  	if testing.Short() {
  1113  		t.SkipNow()
  1114  	}
  1115  	// Setup consensus sets.
  1116  	cst1, err := blankConsensusSetTester(t.Name()+"1", modules.ProdDependencies)
  1117  	if err != nil {
  1118  		t.Fatal(err)
  1119  	}
  1120  	defer cst1.Close()
  1121  	cst2, err := blankConsensusSetTester(t.Name()+"2", modules.ProdDependencies)
  1122  	if err != nil {
  1123  		t.Fatal(err)
  1124  	}
  1125  	defer cst2.Close()
  1126  	// Setup mock gateway.
  1127  	mg := &mockGatewayDoesBroadcast{
  1128  		Gateway:         cst2.cs.gateway,
  1129  		broadcastCalled: make(chan struct{}),
  1130  	}
  1131  	cst2.cs.gateway = mg
  1132  	err = cst1.cs.gateway.Connect(cst2.cs.gateway.Address())
  1133  	if err != nil {
  1134  		t.Fatal(err)
  1135  	}
  1136  	// Give time for on connect RPCs to finish.
  1137  	time.Sleep(500 * time.Millisecond)
  1138  
  1139  	// Test that broadcasting an invalid block header over RelayHeader on cst1.cs
  1140  	// does not result in cst2.cs.gateway receiving a broadcast.
  1141  	cst1.cs.gateway.Broadcast("RelayHeader", types.BlockHeader{}, cst1.cs.gateway.Peers())
  1142  	select {
  1143  	case <-mg.broadcastCalled:
  1144  		t.Fatal("RelayHeader broadcasted an invalid block header")
  1145  	case <-time.After(500 * time.Millisecond):
  1146  	}
  1147  
  1148  	// Test that broadcasting a valid block header over RelayHeader on cst1.cs
  1149  	// causes cst2.cs.gateway to receive a broadcast.
  1150  	validBlock, err := cst1.miner.FindBlock()
  1151  	if err != nil {
  1152  		t.Fatal(err)
  1153  	}
  1154  	_, err = cst1.cs.managedAcceptBlocks([]types.Block{validBlock})
  1155  	if err != nil {
  1156  		t.Fatal(err)
  1157  	}
  1158  	cst1.cs.gateway.Broadcast("RelayHeader", validBlock.Header(), cst1.cs.gateway.Peers())
  1159  	select {
  1160  	case <-mg.broadcastCalled:
  1161  	case <-time.After(1500 * time.Millisecond):
  1162  		t.Fatal("RelayHeader didn't broadcast a valid block header")
  1163  	}
  1164  }
  1165  
  1166  // TestIntegrationRelaySynchronize tests that blocks are relayed as they are
  1167  // accepted and that peers stay synchronized.
  1168  func TestIntegrationRelaySynchronize(t *testing.T) {
  1169  	if testing.Short() {
  1170  		t.SkipNow()
  1171  	}
  1172  	cst1, err := blankConsensusSetTester(t.Name()+"1", modules.ProdDependencies)
  1173  	if err != nil {
  1174  		t.Fatal(err)
  1175  	}
  1176  	defer cst1.Close()
  1177  	cst2, err := blankConsensusSetTester(t.Name()+"2", modules.ProdDependencies)
  1178  	if err != nil {
  1179  		t.Fatal(err)
  1180  	}
  1181  	defer cst2.Close()
  1182  	cst3, err := blankConsensusSetTester(t.Name()+"3", modules.ProdDependencies)
  1183  	if err != nil {
  1184  		t.Fatal(err)
  1185  	}
  1186  	defer cst3.Close()
  1187  
  1188  	// Connect them like so: cst1 <-> cst2 <-> cst3
  1189  	err = cst1.gateway.Connect(cst2.gateway.Address())
  1190  	if err != nil {
  1191  		t.Fatal(err)
  1192  	}
  1193  	err = cst2.gateway.Connect(cst3.gateway.Address())
  1194  	if err != nil {
  1195  		t.Fatal(err)
  1196  	}
  1197  	// Make sure cst1 is not connected to cst3.
  1198  	cst1.gateway.Disconnect(cst3.gateway.Address())
  1199  	cst3.gateway.Disconnect(cst1.gateway.Address())
  1200  
  1201  	// Spin until the connection calls have completed.
  1202  	for i := 0; i < 100; i++ {
  1203  		time.Sleep(150 * time.Millisecond)
  1204  		if len(cst1.gateway.Peers()) >= 1 && len(cst3.gateway.Peers()) >= 1 {
  1205  			break
  1206  		}
  1207  	}
  1208  	if len(cst1.gateway.Peers()) < 1 || len(cst3.gateway.Peers()) < 1 {
  1209  		t.Fatal("Peer connection has failed.")
  1210  	}
  1211  
  1212  	// Mine a block on cst1, expecting the block to propagate from cst1 to
  1213  	// cst2, and then to cst3.
  1214  	b1, err := cst1.miner.AddBlock()
  1215  	if err != nil {
  1216  		t.Log(b1.ID())
  1217  		t.Log(cst1.cs.CurrentBlock().ID())
  1218  		t.Log(cst2.cs.CurrentBlock().ID())
  1219  		t.Fatal(err)
  1220  	}
  1221  
  1222  	// Spin until the block has propagated to cst2.
  1223  	for i := 0; i < 100; i++ {
  1224  		time.Sleep(150 * time.Millisecond)
  1225  		if cst2.cs.CurrentBlock().ID() == b1.ID() {
  1226  			break
  1227  		}
  1228  	}
  1229  	if cst2.cs.CurrentBlock().ID() != b1.ID() {
  1230  		t.Fatal("Block propagation has failed")
  1231  	}
  1232  	// Spin until the block has propagated to cst3.
  1233  	for i := 0; i < 100; i++ {
  1234  		time.Sleep(150 * time.Millisecond)
  1235  		if cst3.cs.CurrentBlock().ID() == b1.ID() {
  1236  			break
  1237  		}
  1238  	}
  1239  	if cst3.cs.CurrentBlock().ID() != b1.ID() {
  1240  		t.Fatal("Block propagation has failed")
  1241  	}
  1242  
  1243  	// Mine a block on cst2.
  1244  	b2, err := cst2.miner.AddBlock()
  1245  	if err != nil {
  1246  		t.Log(b1.ID())
  1247  		t.Log(b2.ID())
  1248  		t.Log(cst2.cs.CurrentBlock().ID())
  1249  		t.Log(cst3.cs.CurrentBlock().ID())
  1250  		t.Fatal(err)
  1251  	}
  1252  	// Spin until the block has propagated to cst1.
  1253  	for i := 0; i < 100; i++ {
  1254  		time.Sleep(150 * time.Millisecond)
  1255  		if cst1.cs.CurrentBlock().ID() == b2.ID() {
  1256  			break
  1257  		}
  1258  	}
  1259  	if cst1.cs.CurrentBlock().ID() != b2.ID() {
  1260  		t.Fatal("block propagation has failed")
  1261  	}
  1262  	// Spin until the block has propagated to cst3.
  1263  	for i := 0; i < 100; i++ {
  1264  		time.Sleep(150 * time.Millisecond)
  1265  		if cst3.cs.CurrentBlock().ID() == b2.ID() {
  1266  			break
  1267  		}
  1268  	}
  1269  	if cst3.cs.CurrentBlock().ID() != b2.ID() {
  1270  		t.Fatal("block propagation has failed")
  1271  	}
  1272  
  1273  	// Mine a block on cst3.
  1274  	b3, err := cst3.miner.AddBlock()
  1275  	if err != nil {
  1276  		t.Log(b1.ID())
  1277  		t.Log(b2.ID())
  1278  		t.Log(b3.ID())
  1279  		t.Log(cst1.cs.CurrentBlock().ID())
  1280  		t.Log(cst2.cs.CurrentBlock().ID())
  1281  		t.Log(cst3.cs.CurrentBlock().ID())
  1282  		t.Fatal(err)
  1283  	}
  1284  	// Spin until the block has propagated to cst1.
  1285  	for i := 0; i < 100; i++ {
  1286  		time.Sleep(150 * time.Millisecond)
  1287  		if cst1.cs.CurrentBlock().ID() == b3.ID() {
  1288  			break
  1289  		}
  1290  	}
  1291  	if cst1.cs.CurrentBlock().ID() != b3.ID() {
  1292  		t.Fatal("block propagation has failed")
  1293  	}
  1294  	// Spin until the block has propagated to cst2.
  1295  	for i := 0; i < 100; i++ {
  1296  		time.Sleep(150 * time.Millisecond)
  1297  		if cst2.cs.CurrentBlock().ID() == b3.ID() {
  1298  			break
  1299  		}
  1300  	}
  1301  	if cst2.cs.CurrentBlock().ID() != b3.ID() {
  1302  		t.Fatal("block propagation has failed")
  1303  	}
  1304  
  1305  	// Check that cst1 and cst3 are not peers, if they are peers then this test
  1306  	// is invalid because it has failed to be certain that blocks can make
  1307  	// multiple hops.
  1308  	if len(cst1.gateway.Peers()) != 1 || cst1.gateway.Peers()[0].NetAddress == cst3.gateway.Address() {
  1309  		t.Log("Test is invalid, cst1 and cst3 have connected to each other")
  1310  	}
  1311  	if len(cst3.gateway.Peers()) != 1 || cst3.gateway.Peers()[0].NetAddress == cst1.gateway.Address() {
  1312  		t.Log("Test is invalid, cst3 and cst1 have connected to each other")
  1313  	}
  1314  }
  1315  
  1316  // mockPeerConnMockReadWrite is a mock implementation of modules.PeerConn that
  1317  // returns fails reading or writing if readErr or writeErr is non-nil,
  1318  // respectively.
  1319  type mockPeerConnMockReadWrite struct {
  1320  	modules.PeerConn
  1321  	readErr  error
  1322  	writeErr error
  1323  }
  1324  
  1325  // Read is a mock implementation of conn.Read that fails with the mock error if
  1326  // readErr != nil.
  1327  func (conn mockPeerConnMockReadWrite) Read(b []byte) (n int, err error) {
  1328  	if conn.readErr != nil {
  1329  		return 0, conn.readErr
  1330  	}
  1331  	return conn.PeerConn.Read(b)
  1332  }
  1333  
  1334  // Write is a mock implementation of conn.Write that fails with the mock error
  1335  // if writeErr != nil.
  1336  func (conn mockPeerConnMockReadWrite) Write(b []byte) (n int, err error) {
  1337  	if conn.writeErr != nil {
  1338  		return 0, conn.writeErr
  1339  	}
  1340  	return conn.PeerConn.Write(b)
  1341  }
  1342  
  1343  // mockNetError is a mock net.Error.
  1344  type mockNetError struct {
  1345  	error
  1346  	timeout   bool
  1347  	temporary bool
  1348  }
  1349  
  1350  // Timeout is a mock implementation of net.Error.Timeout.
  1351  func (err mockNetError) Timeout() bool {
  1352  	return err.timeout
  1353  }
  1354  
  1355  // Temporary is a mock implementation of net.Error.Temporary.
  1356  func (err mockNetError) Temporary() bool {
  1357  	return err.temporary
  1358  }
  1359  
  1360  // TestThreadedReceiveBlocksStalls tests that threadedReceiveBlocks returns
  1361  // errSendBlocksStalled when the connection times out before a block is
  1362  // received.
  1363  func TestThreadedReceiveBlocksStalls(t *testing.T) {
  1364  	if testing.Short() {
  1365  		t.SkipNow()
  1366  	}
  1367  
  1368  	cst, err := blankConsensusSetTester(t.Name(), modules.ProdDependencies)
  1369  	if err != nil {
  1370  		t.Fatal(err)
  1371  	}
  1372  	defer cst.Close()
  1373  
  1374  	p1, p2 := net.Pipe()
  1375  	mockP2 := mockPeerConn{p2}
  1376  
  1377  	writeTimeoutConn := mockPeerConnMockReadWrite{
  1378  		PeerConn: mockP2,
  1379  		writeErr: mockNetError{
  1380  			error:   errors.New("Write timeout"),
  1381  			timeout: true,
  1382  		},
  1383  	}
  1384  	readTimeoutConn := mockPeerConnMockReadWrite{
  1385  		PeerConn: mockP2,
  1386  		readErr: mockNetError{
  1387  			error:   errors.New("Read timeout"),
  1388  			timeout: true,
  1389  		},
  1390  	}
  1391  
  1392  	readNetErrConn := mockPeerConnMockReadWrite{
  1393  		PeerConn: mockP2,
  1394  		readErr: mockNetError{
  1395  			error: errors.New("mock read net.Error"),
  1396  		},
  1397  	}
  1398  	writeNetErrConn := mockPeerConnMockReadWrite{
  1399  		PeerConn: mockP2,
  1400  		writeErr: mockNetError{
  1401  			error: errors.New("mock write net.Error"),
  1402  		},
  1403  	}
  1404  
  1405  	readErrConn := mockPeerConnMockReadWrite{
  1406  		PeerConn: mockP2,
  1407  		readErr:  errors.New("mock read err"),
  1408  	}
  1409  	writeErrConn := mockPeerConnMockReadWrite{
  1410  		PeerConn: mockP2,
  1411  		writeErr: errors.New("mock write err"),
  1412  	}
  1413  
  1414  	// Test that threadedReceiveBlocks errors with errSendBlocksStalled when 0
  1415  	// blocks have been sent and the conn times out.
  1416  	err = cst.cs.threadedReceiveBlocks(writeTimeoutConn)
  1417  	if err != errSendBlocksStalled {
  1418  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", errSendBlocksStalled, err)
  1419  	}
  1420  	errChan := make(chan error)
  1421  	go func() {
  1422  		var knownBlocks [32]types.BlockID
  1423  		errChan <- encoding.ReadObject(p1, &knownBlocks, 32*crypto.HashSize)
  1424  	}()
  1425  	err = cst.cs.threadedReceiveBlocks(readTimeoutConn)
  1426  	if err != errSendBlocksStalled {
  1427  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", errSendBlocksStalled, err)
  1428  	}
  1429  	err = <-errChan
  1430  	if err != nil {
  1431  		t.Fatal(err)
  1432  	}
  1433  
  1434  	// Test that threadedReceiveBlocks errors when writing the block history fails.
  1435  	// Test with an error of type net.Error.
  1436  	err = cst.cs.threadedReceiveBlocks(writeNetErrConn)
  1437  	if err != writeNetErrConn.writeErr {
  1438  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", writeNetErrConn.writeErr, err)
  1439  	}
  1440  	// Test with an error of type error.
  1441  	err = cst.cs.threadedReceiveBlocks(writeErrConn)
  1442  	if err != writeErrConn.writeErr {
  1443  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", writeErrConn.writeErr, err)
  1444  	}
  1445  
  1446  	// Test that threadedReceiveBlocks errors when reading blocks fails.
  1447  	// Test with an error of type net.Error.
  1448  	go func() {
  1449  		var knownBlocks [32]types.BlockID
  1450  		errChan <- encoding.ReadObject(p1, &knownBlocks, 32*crypto.HashSize)
  1451  	}()
  1452  	err = cst.cs.threadedReceiveBlocks(readNetErrConn)
  1453  	if err != readNetErrConn.readErr {
  1454  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", readNetErrConn.readErr, err)
  1455  	}
  1456  	err = <-errChan
  1457  	if err != nil {
  1458  		t.Fatal(err)
  1459  	}
  1460  	// Test with an error of type error.
  1461  	go func() {
  1462  		var knownBlocks [32]types.BlockID
  1463  		errChan <- encoding.ReadObject(p1, &knownBlocks, 32*crypto.HashSize)
  1464  	}()
  1465  	err = cst.cs.threadedReceiveBlocks(readErrConn)
  1466  	if err != readErrConn.readErr {
  1467  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", readErrConn.readErr, err)
  1468  	}
  1469  	err = <-errChan
  1470  	if err != nil {
  1471  		t.Fatal(err)
  1472  	}
  1473  
  1474  	// TODO: Test that threadedReceiveBlocks doesn't error with a timeout if it has received one block before this timed out read/write.
  1475  
  1476  	// TODO: Test that threadedReceiveBlocks doesn't error with errSendBlocksStalled if it successfully received one block.
  1477  }
  1478  
  1479  // TestIntegrationSendBlocksStalls tests that the SendBlocks RPC fails with
  1480  // errSendBlockStalled when the RPC timesout and the requesting end has
  1481  // received 0 blocks.
  1482  func TestIntegrationSendBlocksStalls(t *testing.T) {
  1483  	if testing.Short() {
  1484  		t.SkipNow()
  1485  	}
  1486  
  1487  	cstLocal, err := blankConsensusSetTester(t.Name()+"- local", modules.ProdDependencies)
  1488  	if err != nil {
  1489  		t.Fatal(err)
  1490  	}
  1491  	defer cstLocal.Close()
  1492  	cstRemote, err := blankConsensusSetTester(t.Name()+"- remote", modules.ProdDependencies)
  1493  	if err != nil {
  1494  		t.Fatal(err)
  1495  	}
  1496  	defer cstRemote.Close()
  1497  
  1498  	cstLocal.cs.gateway.Connect(cstRemote.cs.gateway.Address())
  1499  
  1500  	// Lock the remote CST so that SendBlocks blocks and timesout.
  1501  	cstRemote.cs.mu.Lock()
  1502  	defer cstRemote.cs.mu.Unlock()
  1503  	err = cstLocal.cs.gateway.RPC(cstRemote.cs.gateway.Address(), "SendBlocks", cstLocal.cs.threadedReceiveBlocks)
  1504  	if err != errSendBlocksStalled {
  1505  		t.Fatal(err)
  1506  	}
  1507  }