github.com/johnathanhowell/sia@v0.5.1-beta.0.20160524050156-83dcc3d37c94/modules/consensus/synchronize_test.go (about)

     1  package consensus
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"io"
     7  	"net"
     8  	"path/filepath"
     9  	"strconv"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/NebulousLabs/Sia/build"
    15  	"github.com/NebulousLabs/Sia/crypto"
    16  	"github.com/NebulousLabs/Sia/encoding"
    17  	"github.com/NebulousLabs/Sia/modules"
    18  	"github.com/NebulousLabs/Sia/modules/gateway"
    19  	"github.com/NebulousLabs/Sia/types"
    20  
    21  	"github.com/NebulousLabs/bolt"
    22  )
    23  
    24  // TestSynchronize tests that the consensus set can successfully synchronize
    25  // to a peer.
    26  func TestSynchronize(t *testing.T) {
    27  	if testing.Short() {
    28  		t.SkipNow()
    29  	}
    30  
    31  	cst1, err := createConsensusSetTester("TestSynchronize1")
    32  	if err != nil {
    33  		t.Fatal(err)
    34  	}
    35  	defer cst1.Close()
    36  	cst2, err := createConsensusSetTester("TestSynchronize2")
    37  	if err != nil {
    38  		t.Fatal(err)
    39  	}
    40  	defer cst2.Close()
    41  
    42  	// mine on cst2 until it is above cst1
    43  	for cst1.cs.dbBlockHeight() >= cst2.cs.dbBlockHeight() {
    44  		b, _ := cst2.miner.FindBlock()
    45  		err = cst2.cs.AcceptBlock(b)
    46  		if err != nil {
    47  			t.Fatal(err)
    48  		}
    49  	}
    50  
    51  	// connect gateways, triggering a Synchronize
    52  	err = cst1.gateway.Connect(cst2.gateway.Address())
    53  	if err != nil {
    54  		t.Fatal(err)
    55  	}
    56  
    57  	// blockchains should now match
    58  	for cst1.cs.dbCurrentBlockID() != cst2.cs.dbCurrentBlockID() {
    59  		time.Sleep(10 * time.Millisecond)
    60  	}
    61  
    62  	// Mine on cst2 until it is more than 'MaxCatchUpBlocks' ahead of cst2.
    63  	// NOTE: we have to disconnect prior to this, otherwise cst2 will relay
    64  	// blocks to cst1.
    65  	err = cst1.gateway.Disconnect(cst2.gateway.Address())
    66  	if err != nil {
    67  		t.Fatal(err)
    68  	}
    69  	// TODO: more than 30 causes a race condition!
    70  	for cst2.cs.dbBlockHeight() < cst1.cs.dbBlockHeight()+20 {
    71  		b, _ := cst2.miner.FindBlock()
    72  		err = cst2.cs.AcceptBlock(b)
    73  		if err != nil {
    74  			t.Fatal(err)
    75  		}
    76  	}
    77  	// reconnect
    78  	err = cst1.gateway.Connect(cst2.gateway.Address())
    79  	if err != nil {
    80  		t.Fatal(err)
    81  	}
    82  
    83  	// block heights should now match
    84  	for cst1.cs.dbBlockHeight() != cst2.cs.dbBlockHeight() {
    85  		time.Sleep(250 * time.Millisecond)
    86  	}
    87  
    88  	/*
    89  		// extend cst2 with a "bad" (old) block, and synchronize. cst1 should
    90  		// reject the bad block.
    91  		lockID := cst2.cs.mu.Lock()
    92  		cst2.cs.db.pushPath(cst2.cs.db.getPath(0))
    93  		cst2.cs.mu.Unlock(lockID)
    94  		if cst1.cs.db.pathHeight() == cst2.cs.db.pathHeight() {
    95  			t.Fatal("cst1 did not reject bad block")
    96  		}
    97  	*/
    98  }
    99  
   100  func TestResynchronize(t *testing.T) {
   101  	t.Skip("takes way too long")
   102  
   103  	cst1, err := createConsensusSetTester("TestResynchronize1")
   104  	if err != nil {
   105  		t.Fatal(err)
   106  	}
   107  	defer cst1.Close()
   108  	cst2, err := createConsensusSetTester("TestResynchronize2")
   109  	if err != nil {
   110  		t.Fatal(err)
   111  	}
   112  	defer cst2.Close()
   113  
   114  	// TODO: without this extra block, sync fails. Why?
   115  	b, _ := cst2.miner.FindBlock()
   116  	err = cst2.cs.AcceptBlock(b)
   117  	if err != nil {
   118  		t.Fatal(err)
   119  	}
   120  
   121  	// connect and disconnect, so that cst1 and cst2 are synchronized
   122  	err = cst1.gateway.Connect(cst2.gateway.Address())
   123  	if err != nil {
   124  		t.Fatal(err)
   125  	}
   126  	err = cst1.gateway.Disconnect(cst2.gateway.Address())
   127  	if err != nil {
   128  		t.Fatal(err)
   129  	}
   130  
   131  	if cst1.cs.dbCurrentBlockID() != cst2.cs.dbCurrentBlockID() {
   132  		t.Fatal("Consensus Sets did not synchronize")
   133  	}
   134  
   135  	// mine a block on cst2, but hide it from cst1 during reconnect
   136  	/*
   137  		b, _ = cst2.miner.FindBlock()
   138  		err = cst2.cs.AcceptBlock(b)
   139  		if err != nil {
   140  			t.Fatal(err)
   141  		}
   142  		lockID := cst2.cs.mu.Lock()
   143  		id := cst2.cs.currentBlockID()
   144  		err = cst2.cs.db.popPath()
   145  		if err != nil {
   146  			t.Fatal(err)
   147  		}
   148  		cst2.cs.mu.Unlock(lockID)
   149  
   150  		err = cst1.gateway.Connect(cst2.gateway.Address())
   151  		if err != nil {
   152  			t.Fatal(err)
   153  		}
   154  
   155  		// add id back to cst2's current path
   156  		lockID = cst2.cs.mu.Lock()
   157  		err = cst2.cs.db.pushPath(id)
   158  		if err != nil {
   159  			t.Fatal(err)
   160  		}
   161  		cst2.cs.mu.Unlock(lockID)
   162  
   163  		// cst1 should not have the block
   164  		if cst1.cs.dbBlockHeight() == cst2.cs.dbBlockHeight() {
   165  			t.Fatal("Consensus Sets should not have the same height")
   166  		}
   167  	*/
   168  }
   169  
   170  // TestBlockHistory tests that blockHistory returns the expected sequence of
   171  // block IDs.
   172  func TestBlockHistory(t *testing.T) {
   173  	if testing.Short() {
   174  		t.SkipNow()
   175  	}
   176  
   177  	cst, err := createConsensusSetTester("TestBlockHistory")
   178  	if err != nil {
   179  		t.Fatal(err)
   180  	}
   181  	defer cst.Close()
   182  
   183  	// mine until we have enough blocks to test blockHistory
   184  	for cst.cs.dbBlockHeight() < 50 {
   185  		b, _ := cst.miner.FindBlock()
   186  		err = cst.cs.AcceptBlock(b)
   187  		if err != nil {
   188  			t.Fatal(err)
   189  		}
   190  	}
   191  
   192  	var history [32]types.BlockID
   193  	_ = cst.cs.db.View(func(tx *bolt.Tx) error {
   194  		history = blockHistory(tx)
   195  		return nil
   196  	})
   197  
   198  	// validate history
   199  	cst.cs.mu.Lock()
   200  	// first 10 IDs are linear
   201  	for i := types.BlockHeight(0); i < 10; i++ {
   202  		id, err := cst.cs.dbGetPath(cst.cs.dbBlockHeight() - i)
   203  		if err != nil {
   204  			t.Fatal(err)
   205  		}
   206  		if history[i] != id {
   207  			t.Errorf("Wrong ID in history: expected %v, got %v", id, history[i])
   208  		}
   209  	}
   210  	// next 4 IDs are exponential
   211  	heights := []types.BlockHeight{11, 15, 23, 39}
   212  	for i, height := range heights {
   213  		id, err := cst.cs.dbGetPath(cst.cs.dbBlockHeight() - height)
   214  		if err != nil {
   215  			t.Fatal(err)
   216  		}
   217  		if history[10+i] != id {
   218  			t.Errorf("Wrong ID in history: expected %v, got %v", height, history[10+i])
   219  		}
   220  	}
   221  	// finally, the genesis ID
   222  	genesisID, err := cst.cs.dbGetPath(0)
   223  	if err != nil {
   224  		t.Fatal(err)
   225  	}
   226  	if history[31] != genesisID {
   227  		t.Errorf("Wrong ID in history: expected %v, got %v", genesisID, history[31])
   228  	}
   229  
   230  	cst.cs.mu.Unlock()
   231  
   232  	// remaining IDs should be empty
   233  	var emptyID types.BlockID
   234  	for i, id := range history[14:31] {
   235  		if id != emptyID {
   236  			t.Errorf("Expected empty ID at index %v, got %v", i+17, id)
   237  		}
   238  	}
   239  }
   240  
   241  // mockGatewayCountBroadcasts implements modules.Gateway to mock the Broadcast
   242  // method.
   243  type mockGatewayCountBroadcasts struct {
   244  	modules.Gateway
   245  	numBroadcasts int
   246  	mu            sync.RWMutex
   247  }
   248  
   249  // Broadcast is a mock implementation of modules.Gateway.Broadcast that
   250  // increments a counter denoting the number of times it's been called.
   251  func (g *mockGatewayCountBroadcasts) Broadcast(name string, obj interface{}, peers []modules.Peer) {
   252  	g.mu.Lock()
   253  	g.numBroadcasts++
   254  	g.mu.Unlock()
   255  	g.Gateway.Broadcast(name, obj, peers)
   256  }
   257  
   258  // TestSendBlocksBroadcastsOnce tests that the SendBlocks RPC call only
   259  // Broadcasts one block, no matter how many blocks are sent. In the case 0
   260  // blocks are sent, tests that Broadcast is never called.
   261  func TestSendBlocksBroadcastsOnce(t *testing.T) {
   262  	if testing.Short() {
   263  		t.SkipNow()
   264  	}
   265  
   266  	// Setup consensus sets.
   267  	cst1, err := blankConsensusSetTester("TestSendBlocksBroadcastsOnce1")
   268  	if err != nil {
   269  		t.Fatal(err)
   270  	}
   271  	defer cst1.Close()
   272  	cst2, err := blankConsensusSetTester("TestSendBlocksBroadcastsOnce2")
   273  	if err != nil {
   274  		t.Fatal(err)
   275  	}
   276  	defer cst2.Close()
   277  	// Setup mock gateway.
   278  	mg := mockGatewayCountBroadcasts{Gateway: cst1.cs.gateway}
   279  	cst1.cs.gateway = &mg
   280  	err = cst1.cs.gateway.Connect(cst2.cs.gateway.Address())
   281  	if err != nil {
   282  		t.Fatal(err)
   283  	}
   284  
   285  	tests := []struct {
   286  		blocksToMine          int
   287  		expectedNumBroadcasts int
   288  		synced                bool
   289  	}{
   290  		// Test that no blocks are broadcast during IBD.
   291  		{
   292  			blocksToMine:          0,
   293  			expectedNumBroadcasts: 0,
   294  			synced:                false,
   295  		},
   296  		{
   297  			blocksToMine:          1,
   298  			expectedNumBroadcasts: 0,
   299  			synced:                false,
   300  		},
   301  		{
   302  			blocksToMine:          2,
   303  			expectedNumBroadcasts: 0,
   304  			synced:                false,
   305  		},
   306  		// Test that only one blocks is broadcast when IBD is done.
   307  		{
   308  			blocksToMine:          0,
   309  			expectedNumBroadcasts: 0,
   310  			synced:                true,
   311  		},
   312  		{
   313  			blocksToMine:          1,
   314  			expectedNumBroadcasts: 2,
   315  			synced:                true,
   316  		},
   317  		{
   318  			blocksToMine:          2,
   319  			expectedNumBroadcasts: 2,
   320  			synced:                true,
   321  		},
   322  		{
   323  			blocksToMine:          int(MaxCatchUpBlocks),
   324  			expectedNumBroadcasts: 2,
   325  			synced:                true,
   326  		},
   327  		{
   328  			blocksToMine:          2 * int(MaxCatchUpBlocks),
   329  			expectedNumBroadcasts: 2,
   330  			synced:                true,
   331  		},
   332  		{
   333  			blocksToMine:          2*int(MaxCatchUpBlocks) + 1,
   334  			expectedNumBroadcasts: 2,
   335  			synced:                true,
   336  		},
   337  	}
   338  	for _, test := range tests {
   339  		cst1.cs.mu.Lock()
   340  		cst1.cs.synced = test.synced
   341  		cst1.cs.mu.Unlock()
   342  		mg.mu.Lock()
   343  		mg.numBroadcasts = 0
   344  		mg.mu.Unlock()
   345  		for i := 0; i < test.blocksToMine; i++ {
   346  			b, minerErr := cst2.miner.FindBlock()
   347  			if minerErr != nil {
   348  				t.Fatal(minerErr)
   349  			}
   350  			// managedAcceptBlock is used here instead of AcceptBlock so as not to
   351  			// call Broadcast outside of the SendBlocks RPC.
   352  			err = cst2.cs.managedAcceptBlock(b)
   353  			if err != nil {
   354  				t.Fatal(err)
   355  			}
   356  		}
   357  		err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlocks", cst1.cs.threadedReceiveBlocks)
   358  		if err != nil {
   359  			t.Fatal(err)
   360  		}
   361  		// Sleep to wait for possible calls to Broadcast to complete. We cannot
   362  		// wait on a channel because we don't know how many times broadcast has
   363  		// been called.
   364  		time.Sleep(10 * time.Millisecond)
   365  		mg.mu.RLock()
   366  		numBroadcasts := mg.numBroadcasts
   367  		mg.mu.RUnlock()
   368  		if numBroadcasts != test.expectedNumBroadcasts {
   369  			t.Errorf("expected %d number of broadcasts, got %d", test.expectedNumBroadcasts, numBroadcasts)
   370  		}
   371  	}
   372  }
   373  
   374  // TestIntegrationRPCSendBlocks tests that the SendBlocks RPC adds blocks to
   375  // the consensus set, and that the consensus set catches with the remote peer
   376  // and possibly reorgs.
   377  func TestIntegrationRPCSendBlocks(t *testing.T) {
   378  	if testing.Short() {
   379  		t.SkipNow()
   380  	}
   381  
   382  	type sendBlocksTest struct {
   383  		commonBlocksToMine types.BlockHeight
   384  		localBlocksToMine  types.BlockHeight
   385  		remoteBlocksToMine types.BlockHeight
   386  		msg                string
   387  	}
   388  	tests := []sendBlocksTest{
   389  		{
   390  			msg: "SendBlocks shouldn't do anything when both CSs are at the genesis block",
   391  		},
   392  		{
   393  			commonBlocksToMine: 10,
   394  			msg:                "SendBlocks shouldn't do anything when both CSs are at the same block",
   395  		},
   396  		{
   397  			commonBlocksToMine: 10,
   398  			localBlocksToMine:  5,
   399  			msg:                "SendBlocks shouldn't do anything when the remote CS is behind the local CS",
   400  		},
   401  		{
   402  			commonBlocksToMine: 10,
   403  			remoteBlocksToMine: 5,
   404  			msg:                "SendBlocks should catch up the local CS to the remote CS when it is behind",
   405  		},
   406  		{
   407  			remoteBlocksToMine: 10,
   408  			localBlocksToMine:  5,
   409  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   410  		},
   411  		{
   412  			commonBlocksToMine: 10,
   413  			remoteBlocksToMine: 10,
   414  			localBlocksToMine:  5,
   415  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   416  		},
   417  		{
   418  			remoteBlocksToMine: MaxCatchUpBlocks - 1,
   419  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   420  		},
   421  		{
   422  			remoteBlocksToMine: MaxCatchUpBlocks - 1,
   423  			localBlocksToMine:  MaxCatchUpBlocks - 2,
   424  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   425  		},
   426  		{
   427  			remoteBlocksToMine: MaxCatchUpBlocks,
   428  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   429  		},
   430  		{
   431  			remoteBlocksToMine: MaxCatchUpBlocks,
   432  			localBlocksToMine:  MaxCatchUpBlocks - 2,
   433  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   434  		},
   435  		{
   436  			remoteBlocksToMine: MaxCatchUpBlocks + 1, // There was a bug that caused SendBlocks to be one block behind when its peer was ahead by (k * MaxCatchUpBlocks) + 1 blocks.
   437  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   438  		},
   439  		{
   440  			remoteBlocksToMine: MaxCatchUpBlocks + 1, // There was a bug that caused SendBlocks to be one block behind when its peer was ahead by (k * MaxCatchUpBlocks) + 1 blocks.
   441  			localBlocksToMine:  MaxCatchUpBlocks - 2,
   442  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   443  		},
   444  		{
   445  			remoteBlocksToMine: 2*MaxCatchUpBlocks + 1,
   446  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   447  		},
   448  		{
   449  			remoteBlocksToMine: 2*MaxCatchUpBlocks + 1,
   450  			localBlocksToMine:  2*MaxCatchUpBlocks - 2,
   451  			msg:                "SendBlocks should reorg the local CS when the remote CS's chain is longer",
   452  		},
   453  		{
   454  			remoteBlocksToMine: 12,
   455  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   456  		},
   457  		{
   458  			remoteBlocksToMine: 15,
   459  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   460  		},
   461  		{
   462  			remoteBlocksToMine: 16,
   463  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   464  		},
   465  		{
   466  			remoteBlocksToMine: 17,
   467  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   468  		},
   469  		{
   470  			remoteBlocksToMine: 23,
   471  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   472  		},
   473  		{
   474  			remoteBlocksToMine: 31,
   475  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   476  		},
   477  		{
   478  			remoteBlocksToMine: 32,
   479  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   480  		},
   481  		{
   482  			remoteBlocksToMine: 33,
   483  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   484  		},
   485  	}
   486  	for i := 1; i < 10; i++ {
   487  		tests = append(tests, sendBlocksTest{
   488  			remoteBlocksToMine: types.BlockHeight(i),
   489  			msg:                "SendBlocks should catch up when the remote CS is ahead",
   490  		})
   491  	}
   492  
   493  	for i, tt := range tests {
   494  		// Create the "remote" peer.
   495  		remoteCST, err := blankConsensusSetTester(filepath.Join("TestRPCSendBlocks - remote", strconv.Itoa(i)))
   496  		if err != nil {
   497  			t.Fatal(err)
   498  		}
   499  		// Create the "local" peer.
   500  		localCST, err := blankConsensusSetTester(filepath.Join("TestRPCSendBlocks - local", strconv.Itoa(i)))
   501  		if err != nil {
   502  			t.Fatal(err)
   503  		}
   504  
   505  		localCST.cs.gateway.Connect(remoteCST.cs.gateway.Address())
   506  		// Wait a second to let the OnConnectRPCs finish
   507  		time.Sleep(100 * time.Millisecond)
   508  
   509  		// Mine blocks.
   510  		for i := types.BlockHeight(0); i < tt.commonBlocksToMine; i++ {
   511  			b, err := remoteCST.miner.FindBlock()
   512  			if err != nil {
   513  				t.Fatal(err)
   514  			}
   515  			err = remoteCST.cs.managedAcceptBlock(b)
   516  			if err != nil {
   517  				t.Fatal(err)
   518  			}
   519  			err = localCST.cs.managedAcceptBlock(b)
   520  			if err != nil {
   521  				t.Fatal(err)
   522  			}
   523  		}
   524  		for i := types.BlockHeight(0); i < tt.remoteBlocksToMine; i++ {
   525  			b, err := remoteCST.miner.FindBlock()
   526  			if err != nil {
   527  				t.Fatal(err)
   528  			}
   529  			err = remoteCST.cs.managedAcceptBlock(b)
   530  			if err != nil {
   531  				t.Fatal(err)
   532  			}
   533  		}
   534  		for i := types.BlockHeight(0); i < tt.localBlocksToMine; i++ {
   535  			b, err := localCST.miner.FindBlock()
   536  			if err != nil {
   537  				t.Fatal(err)
   538  			}
   539  			err = localCST.cs.managedAcceptBlock(b)
   540  			if err != nil {
   541  				t.Fatal(err)
   542  			}
   543  		}
   544  
   545  		localCurrentBlockID := localCST.cs.CurrentBlock().ID()
   546  		remoteCurrentBlockID := remoteCST.cs.CurrentBlock().ID()
   547  
   548  		err = localCST.cs.gateway.RPC(remoteCST.cs.gateway.Address(), "SendBlocks", localCST.cs.threadedReceiveBlocks)
   549  		if err != nil {
   550  			t.Error(err)
   551  		}
   552  
   553  		// Assume that if remoteBlocksToMine is greater than localBlocksToMine, then
   554  		// the local CS must have received the new blocks (and reorged).
   555  		if tt.remoteBlocksToMine > tt.localBlocksToMine {
   556  			// Verify that the remote cs did not change.
   557  			if remoteCST.cs.CurrentBlock().ID() != remoteCurrentBlockID {
   558  				t.Errorf("%v: the remote CS is at a different current block than before SendBlocks", tt.msg)
   559  			}
   560  			// Verify that the local cs got the new blocks.
   561  			if localCST.cs.Height() != remoteCST.cs.Height() {
   562  				t.Errorf("%v: expected height %v, got %v", tt.msg, remoteCST.cs.Height(), localCST.cs.Height())
   563  			}
   564  			if localCST.cs.CurrentBlock().ID() != remoteCST.cs.CurrentBlock().ID() {
   565  				t.Errorf("%v: remote and local CSTs have different current blocks", tt.msg)
   566  			}
   567  		} else {
   568  			// Verify that the local cs did not change.
   569  			if localCST.cs.CurrentBlock().ID() != localCurrentBlockID {
   570  				t.Errorf("%v: the local CS is at a different current block than before SendBlocks", tt.msg)
   571  			}
   572  		}
   573  
   574  		// Cleanup.
   575  		localCST.cs.gateway.Disconnect(remoteCST.cs.gateway.Address())
   576  		remoteCST.cs.gateway.Disconnect(localCST.cs.gateway.Address())
   577  		err = localCST.Close()
   578  		if err != nil {
   579  			t.Fatal(err)
   580  		}
   581  		err = remoteCST.Close()
   582  		if err != nil {
   583  			t.Fatal(err)
   584  		}
   585  	}
   586  }
   587  
   588  // TestRPCSendBlockSendsOnlyNecessaryBlocks tests that the SendBlocks RPC only
   589  // sends blocks that the caller does not have and that are part of the longest
   590  // chain.
   591  func TestRPCSendBlockSendsOnlyNecessaryBlocks(t *testing.T) {
   592  	if testing.Short() {
   593  		t.SkipNow()
   594  	}
   595  
   596  	// Create the "remote" peer.
   597  	cst, err := blankConsensusSetTester("TestRPCSendBlockSendsOnlyNecessaryBlocks - remote")
   598  	if err != nil {
   599  		t.Fatal(err)
   600  	}
   601  	defer cst.Close()
   602  	// Create the "local" peer.
   603  	//
   604  	// We create this peer manually (not using blankConsensusSetTester) so that we
   605  	// can connect it to the remote peer before calling consensus.New so as to
   606  	// prevent SendBlocks from triggering on Connect.
   607  	testdir := build.TempDir(modules.ConsensusDir, "TestRPCSendBlockSendsOnlyNecessaryBlocks - local")
   608  	g, err := gateway.New("localhost:0", filepath.Join(testdir, modules.GatewayDir))
   609  	if err != nil {
   610  		t.Fatal(err)
   611  	}
   612  	defer g.Close()
   613  	err = g.Connect(cst.cs.gateway.Address())
   614  	if err != nil {
   615  		t.Fatal(err)
   616  	}
   617  	cs, err := New(g, filepath.Join(testdir, modules.ConsensusDir))
   618  	if err != nil {
   619  		t.Fatal(err)
   620  	}
   621  	defer cs.Close()
   622  
   623  	// Add a few initial blocks to both consensus sets. These are the blocks we
   624  	// want to make sure SendBlocks is not sending unnecessarily as both parties
   625  	// already have them.
   626  	knownBlocks := make(map[types.BlockID]struct{})
   627  	for i := 0; i < 20; i++ {
   628  		b, err := cst.miner.FindBlock()
   629  		if err != nil {
   630  			t.Fatal(err)
   631  		}
   632  		err = cst.cs.managedAcceptBlock(b)
   633  		if err != nil {
   634  			t.Fatal(err)
   635  		}
   636  		err = cs.managedAcceptBlock(b)
   637  		if err != nil {
   638  			t.Fatal(err)
   639  		}
   640  		knownBlocks[b.ID()] = struct{}{}
   641  	}
   642  
   643  	// Add a few blocks to only the remote peer and store which blocks we add.
   644  	addedBlocks := make(map[types.BlockID]struct{})
   645  	for i := 0; i < 20; i++ {
   646  		b, err := cst.miner.FindBlock()
   647  		if err != nil {
   648  			t.Fatal(err)
   649  		}
   650  		err = cst.cs.managedAcceptBlock(b)
   651  		if err != nil {
   652  			t.Fatal(err)
   653  		}
   654  		addedBlocks[b.ID()] = struct{}{}
   655  	}
   656  
   657  	err = cs.gateway.RPC(cst.cs.gateway.Address(), "SendBlocks", func(conn modules.PeerConn) error {
   658  		// Get blockIDs to send.
   659  		var history [32]types.BlockID
   660  		cs.mu.RLock()
   661  		err := cs.db.View(func(tx *bolt.Tx) error {
   662  			history = blockHistory(tx)
   663  			return nil
   664  		})
   665  		cs.mu.RUnlock()
   666  		if err != nil {
   667  			return err
   668  		}
   669  
   670  		// Send the block ids.
   671  		if err := encoding.WriteObject(conn, history); err != nil {
   672  			return err
   673  		}
   674  
   675  		moreAvailable := true
   676  		for moreAvailable {
   677  			// Read a slice of blocks from the wire.
   678  			var newBlocks []types.Block
   679  			if err := encoding.ReadObject(conn, &newBlocks, uint64(MaxCatchUpBlocks)*types.BlockSizeLimit); err != nil {
   680  				return err
   681  			}
   682  			if err := encoding.ReadObject(conn, &moreAvailable, 1); err != nil {
   683  				return err
   684  			}
   685  
   686  			// Check if the block needed to be sent.
   687  			for _, newB := range newBlocks {
   688  				_, ok := knownBlocks[newB.ID()]
   689  				if ok {
   690  					t.Error("SendBlocks sent an unnecessary block that the caller already had")
   691  					continue
   692  				}
   693  				_, ok = addedBlocks[newB.ID()]
   694  				if !ok {
   695  					t.Error("SendBlocks sent an unnecessary block that the caller did not have")
   696  				}
   697  			}
   698  		}
   699  		return nil
   700  	})
   701  	if err != nil {
   702  		t.Fatal(err)
   703  	}
   704  }
   705  
   706  // mock PeerConns for testing peer conns that fail reading or writing.
   707  type (
   708  	mockPeerConnFailingReader struct {
   709  		modules.PeerConn
   710  	}
   711  	mockPeerConnFailingWriter struct {
   712  		modules.PeerConn
   713  	}
   714  )
   715  
   716  var (
   717  	errFailingReader = errors.New("failing reader")
   718  	errFailingWriter = errors.New("failing writer")
   719  )
   720  
   721  // Read is a mock implementation of modules.PeerConn.Read that always returns
   722  // an error.
   723  func (mockPeerConnFailingReader) Read([]byte) (int, error) {
   724  	return 0, errFailingReader
   725  }
   726  
   727  // Write is a mock implementation of modules.PeerConn.Write that always returns
   728  // an error.
   729  func (mockPeerConnFailingWriter) Write([]byte) (int, error) {
   730  	return 0, errFailingWriter
   731  }
   732  
   733  // TestSendBlk probes the ConsensusSet.rpcSendBlk method and tests that it
   734  // correctly receives block ids and writes out the corresponding blocks.
   735  func TestSendBlk(t *testing.T) {
   736  	cst, err := blankConsensusSetTester("TestSendBlk")
   737  	if err != nil {
   738  		t.Fatal(err)
   739  	}
   740  	defer cst.Close()
   741  
   742  	p1, p2 := net.Pipe()
   743  	fnErr := make(chan error)
   744  
   745  	tests := []struct {
   746  		id      types.BlockID
   747  		conn    modules.PeerConn
   748  		fn      func() // handle reading and writing over the pipe to the mock conn.
   749  		errWant error
   750  		msg     string
   751  	}{
   752  		// TODO: Test with a failing database.
   753  		// Test with a failing reader.
   754  		{
   755  			conn:    mockPeerConnFailingReader{PeerConn: p1},
   756  			fn:      func() { fnErr <- nil },
   757  			errWant: errFailingReader,
   758  			msg:     "expected rpcSendBlk to error with a failing reader conn",
   759  		},
   760  		// Test with a block id not found in the blockmap.
   761  		{
   762  			conn: p1,
   763  			fn: func() {
   764  				// Write a block id to the conn.
   765  				fnErr <- encoding.WriteObject(p2, types.BlockID{})
   766  			},
   767  			errWant: errNilItem,
   768  			msg:     "expected rpcSendBlk to error with a nonexistent block id",
   769  		},
   770  		// Test with a failing writer.
   771  		{
   772  			conn: mockPeerConnFailingWriter{PeerConn: p1},
   773  			fn: func() {
   774  				// Write a valid block id to the conn.
   775  				fnErr <- encoding.WriteObject(p2, types.GenesisID)
   776  			},
   777  			errWant: errFailingWriter,
   778  			msg:     "expected rpcSendBlk to error with a failing writer conn",
   779  		},
   780  		// Test with a valid conn and valid block.
   781  		{
   782  			conn: p1,
   783  			fn: func() {
   784  				// Write a valid block id to the conn.
   785  				if err := encoding.WriteObject(p2, types.GenesisID); err != nil {
   786  					fnErr <- err
   787  				}
   788  
   789  				// Read the block written to the conn.
   790  				var block types.Block
   791  				if err := encoding.ReadObject(p2, &block, types.BlockSizeLimit); err != nil {
   792  					fnErr <- err
   793  				}
   794  				// Verify the block is the expected block.
   795  				if block.ID() != types.GenesisID {
   796  					fnErr <- fmt.Errorf("rpcSendBlk wrote a different block to conn than the block requested. requested block id: %v, received block id: %v", types.GenesisID, block.ID())
   797  				}
   798  
   799  				fnErr <- nil
   800  			},
   801  			errWant: nil,
   802  			msg:     "expected rpcSendBlk to succeed with a valid conn and valid block",
   803  		},
   804  	}
   805  	for _, tt := range tests {
   806  		go tt.fn()
   807  		err := cst.cs.rpcSendBlk(tt.conn)
   808  		if err != tt.errWant {
   809  			t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err)
   810  		}
   811  		err = <-fnErr
   812  		if err != nil {
   813  			t.Fatal(err)
   814  		}
   815  	}
   816  }
   817  
   818  // TestThreadedReceiveBlock probes the RPCFunc returned by
   819  // cs.threadedReceiveBlock and tests that it correctly requests a block id and
   820  // receives a block. Also tests that the block is correctly (not) accepted into
   821  // the consensus set.
   822  func TestThreadedReceiveBlock(t *testing.T) {
   823  	cst, err := blankConsensusSetTester("TestThreadedReceiveBlock")
   824  	if err != nil {
   825  		t.Fatal(err)
   826  	}
   827  	defer cst.Close()
   828  
   829  	p1, p2 := net.Pipe()
   830  	fnErr := make(chan error)
   831  
   832  	tests := []struct {
   833  		id      types.BlockID
   834  		conn    modules.PeerConn
   835  		fn      func() // handle reading and writing over the pipe to the mock conn.
   836  		errWant error
   837  		msg     string
   838  	}{
   839  		// Test with failing writer.
   840  		{
   841  			conn:    mockPeerConnFailingWriter{PeerConn: p1},
   842  			fn:      func() { fnErr <- nil },
   843  			errWant: errFailingWriter,
   844  			msg:     "the function returned from threadedReceiveBlock should fail with a PeerConn with a failing writer",
   845  		},
   846  		// Test with failing reader.
   847  		{
   848  			conn: mockPeerConnFailingReader{PeerConn: p1},
   849  			fn: func() {
   850  				// Read the id written to conn.
   851  				var id types.BlockID
   852  				if err := encoding.ReadObject(p2, &id, crypto.HashSize); err != nil {
   853  					fnErr <- err
   854  				}
   855  				// Verify the id is the expected id.
   856  				expectedID := types.BlockID{}
   857  				if id != expectedID {
   858  					fnErr <- fmt.Errorf("id written to conn was %v, but id received was %v", expectedID, id)
   859  				}
   860  				fnErr <- nil
   861  			},
   862  			errWant: errFailingReader,
   863  			msg:     "the function returned from threadedReceiveBlock should fail with a PeerConn with a failing reader",
   864  		},
   865  		// Test with a valid conn, but an invalid block.
   866  		{
   867  			id:   types.BlockID{1},
   868  			conn: p1,
   869  			fn: func() {
   870  				// Read the id written to conn.
   871  				var id types.BlockID
   872  				if err := encoding.ReadObject(p2, &id, crypto.HashSize); err != nil {
   873  					fnErr <- err
   874  				}
   875  				// Verify the id is the expected id.
   876  				expectedID := types.BlockID{1}
   877  				if id != expectedID {
   878  					fnErr <- fmt.Errorf("id written to conn was %v, but id received was %v", expectedID, id)
   879  				}
   880  
   881  				// Write an invalid block to conn.
   882  				block := types.Block{}
   883  				if err := encoding.WriteObject(p2, block); err != nil {
   884  					fnErr <- err
   885  				}
   886  
   887  				fnErr <- nil
   888  			},
   889  			errWant: errOrphan,
   890  			msg:     "the function returned from threadedReceiveBlock should not accept an invalid block",
   891  		},
   892  		// Test with a valid conn and a valid block.
   893  		{
   894  			id:   types.BlockID{2},
   895  			conn: p1,
   896  			fn: func() {
   897  				// Read the id written to conn.
   898  				var id types.BlockID
   899  				if err := encoding.ReadObject(p2, &id, crypto.HashSize); err != nil {
   900  					fnErr <- err
   901  				}
   902  				// Verify the id is the expected id.
   903  				expectedID := types.BlockID{2}
   904  				if id != expectedID {
   905  					fnErr <- fmt.Errorf("id written to conn was %v, but id received was %v", expectedID, id)
   906  				}
   907  
   908  				// Write a valid block to conn.
   909  				block, err := cst.miner.FindBlock()
   910  				if err != nil {
   911  					fnErr <- err
   912  				}
   913  				if err := encoding.WriteObject(p2, block); err != nil {
   914  					fnErr <- err
   915  				}
   916  
   917  				fnErr <- nil
   918  			},
   919  			errWant: nil,
   920  			msg:     "the function returned from manageddReceiveBlock should accept a valid block",
   921  		},
   922  	}
   923  	for _, tt := range tests {
   924  		managedReceiveFN := cst.cs.threadedReceiveBlock(tt.id)
   925  		go tt.fn()
   926  		err := managedReceiveFN(tt.conn)
   927  		if err != tt.errWant {
   928  			t.Errorf("%s: expected to fail with `%v', got: `%v'", tt.msg, tt.errWant, err)
   929  		}
   930  		err = <-fnErr
   931  		if err != nil {
   932  			t.Fatal(err)
   933  		}
   934  	}
   935  }
   936  
   937  // TestIntegrationSendBlkRPC probes the SendBlk RPC and tests that blocks are
   938  // correctly requested, received, and accepted into the consensus set.
   939  func TestIntegrationSendBlkRPC(t *testing.T) {
   940  	if testing.Short() {
   941  		t.SkipNow()
   942  	}
   943  	cst1, err := blankConsensusSetTester("TestIntegrationSendBlkRPC1")
   944  	if err != nil {
   945  		t.Fatal(err)
   946  	}
   947  	defer cst1.Close()
   948  	cst2, err := blankConsensusSetTester("TestIntegrationSendBlkRPC2")
   949  	if err != nil {
   950  		t.Fatal(err)
   951  	}
   952  	defer cst2.Close()
   953  
   954  	err = cst1.cs.gateway.Connect(cst2.cs.gateway.Address())
   955  	if err != nil {
   956  		t.Fatal(err)
   957  	}
   958  	err = cst2.cs.gateway.Connect(cst1.cs.gateway.Address())
   959  	if err != nil {
   960  		t.Fatal(err)
   961  	}
   962  	// Sleep to give the consensus sets time to finish the background startup
   963  	// routines - if the block mined below is mined before the sets finish
   964  	// synchronizing to eachother, it screws up the test.
   965  	time.Sleep(500 * time.Millisecond)
   966  
   967  	// Test that cst1 doesn't accept a block it's already seen (the genesis block).
   968  	err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.threadedReceiveBlock(types.GenesisID))
   969  	if err != modules.ErrBlockKnown {
   970  		t.Errorf("cst1 should reject known blocks: expected error '%v', got '%v'", modules.ErrBlockKnown, err)
   971  	}
   972  	// Test that cst2 errors when it doesn't recognize the requested block.
   973  	err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.threadedReceiveBlock(types.BlockID{}))
   974  	if err != io.EOF {
   975  		t.Errorf("cst2 shouldn't return a block it doesn't recognize: expected error '%v', got '%v'", io.EOF, err)
   976  	}
   977  
   978  	// Test that cst1 accepts a block that extends its longest chain.
   979  	block, err := cst2.miner.FindBlock()
   980  	if err != nil {
   981  		t.Fatal(err)
   982  	}
   983  	err = cst2.cs.managedAcceptBlock(block) // Call managedAcceptBlock so that the block isn't broadcast.
   984  	if err != nil {
   985  		t.Fatal(err)
   986  	}
   987  	err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.threadedReceiveBlock(block.ID()))
   988  	if err != nil {
   989  		t.Errorf("cst1 should accept a block that extends its longest chain: expected nil error, got '%v'", err)
   990  	}
   991  
   992  	// Test that cst2 accepts a block that extends its longest chain.
   993  	block, err = cst1.miner.FindBlock()
   994  	if err != nil {
   995  		t.Fatal(err)
   996  	}
   997  	err = cst1.cs.managedAcceptBlock(block) // Call managedAcceptBlock so that the block isn't broadcast.
   998  	if err != nil {
   999  		t.Fatal(err)
  1000  	}
  1001  	err = cst2.cs.gateway.RPC(cst1.cs.gateway.Address(), "SendBlk", cst2.cs.threadedReceiveBlock(block.ID()))
  1002  	if err != nil {
  1003  		t.Errorf("cst2 should accept a block that extends its longest chain: expected nil error, got '%v'", err)
  1004  	}
  1005  
  1006  	// Test that cst1 doesn't accept an orphan block.
  1007  	block, err = cst2.miner.FindBlock()
  1008  	if err != nil {
  1009  		t.Fatal(err)
  1010  	}
  1011  	err = cst2.cs.managedAcceptBlock(block) // Call managedAcceptBlock so that the block isn't broadcast.
  1012  	if err != nil {
  1013  		t.Fatal(err)
  1014  	}
  1015  	block, err = cst2.miner.FindBlock()
  1016  	if err != nil {
  1017  		t.Fatal(err)
  1018  	}
  1019  	err = cst2.cs.managedAcceptBlock(block) // Call managedAcceptBlock so that the block isn't broadcast.
  1020  	if err != nil {
  1021  		t.Fatal(err)
  1022  	}
  1023  	err = cst1.cs.gateway.RPC(cst2.cs.gateway.Address(), "SendBlk", cst1.cs.threadedReceiveBlock(block.ID()))
  1024  	if err != errOrphan {
  1025  		t.Errorf("cst1 should not accept an orphan block: expected error '%v', got '%v'", errOrphan, err)
  1026  	}
  1027  }
  1028  
  1029  type mockGatewayCallsRPC struct {
  1030  	modules.Gateway
  1031  	rpcCalled chan string
  1032  }
  1033  
  1034  func (g *mockGatewayCallsRPC) RPC(addr modules.NetAddress, name string, fn modules.RPCFunc) error {
  1035  	g.rpcCalled <- name
  1036  	return nil
  1037  }
  1038  
  1039  // TestRelayHeader tests that rpcRelayHeader requests the corresponding blocks
  1040  // to valid headers with known parents, or requests the block history to orphan
  1041  // headers.
  1042  func TestRelayHeader(t *testing.T) {
  1043  	cst, err := blankConsensusSetTester("TestRelayHeader")
  1044  	if err != nil {
  1045  		t.Fatal(err)
  1046  	}
  1047  	defer cst.Close()
  1048  
  1049  	mg := &mockGatewayCallsRPC{
  1050  		rpcCalled: make(chan string),
  1051  	}
  1052  	cst.cs.gateway = mg
  1053  
  1054  	p1, p2 := net.Pipe()
  1055  
  1056  	// Valid block that rpcRelayHeader should accept.
  1057  	validBlock, err := cst.miner.FindBlock()
  1058  	if err != nil {
  1059  		t.Fatal(err)
  1060  	}
  1061  
  1062  	// A block in the near future that rpcRelayHeader return an error for, but
  1063  	// still request the corresponding block.
  1064  	block, target, err := cst.miner.BlockForWork()
  1065  	if err != nil {
  1066  		t.Fatal(err)
  1067  	}
  1068  	block.Timestamp = types.CurrentTimestamp() + 2 + types.FutureThreshold
  1069  	futureBlock, _ := cst.miner.SolveBlock(block, target)
  1070  
  1071  	tests := []struct {
  1072  		header  types.BlockHeader
  1073  		errWant error
  1074  		errMSG  string
  1075  		rpcWant string
  1076  		rpcMSG  string
  1077  	}{
  1078  		// Test that rpcRelayHeader rejects known blocks.
  1079  		{
  1080  			header:  types.GenesisBlock.Header(),
  1081  			errWant: modules.ErrBlockKnown,
  1082  			errMSG:  "rpcRelayHeader should reject headers to known blocks",
  1083  		},
  1084  		// Test that rpcRelayHeader requests the parent blocks of orphan headers.
  1085  		{
  1086  			header:  types.BlockHeader{},
  1087  			errWant: nil,
  1088  			errMSG:  "rpcRelayHeader should not return an error for orphan headers",
  1089  			rpcWant: "SendBlocks",
  1090  			rpcMSG:  "rpcRelayHeader should request blocks when the relayed header is an orphan",
  1091  		},
  1092  		// Test that rpcRelayHeader accepts a valid header that extends the longest chain.
  1093  		{
  1094  			header:  validBlock.Header(),
  1095  			errWant: nil,
  1096  			errMSG:  "rpcRelayHeader should accept a valid header",
  1097  			rpcWant: "SendBlk",
  1098  			rpcMSG:  "rpcRelayHeader should request the block of a valid header",
  1099  		},
  1100  		// Test that rpcRelayHeader requests a future, but otherwise valid block.
  1101  		{
  1102  			header:  futureBlock.Header(),
  1103  			errWant: nil,
  1104  			errMSG:  "rpcRelayHeader should not return an error for a future header",
  1105  			rpcWant: "SendBlk",
  1106  			rpcMSG:  "rpcRelayHeader should request the corresponding block to a future, but otherwise valid header",
  1107  		},
  1108  	}
  1109  	errChan := make(chan error)
  1110  	for _, tt := range tests {
  1111  		go func() {
  1112  			errChan <- encoding.WriteObject(p1, tt.header)
  1113  		}()
  1114  		err = cst.cs.rpcRelayHeader(p2)
  1115  		if err != tt.errWant {
  1116  			t.Errorf("%s: expected '%v', got '%v'", tt.errMSG, tt.errWant, err)
  1117  		}
  1118  		err = <-errChan
  1119  		if err != nil {
  1120  			t.Fatal(err)
  1121  		}
  1122  		if tt.rpcWant == "" {
  1123  			select {
  1124  			case rpc := <-mg.rpcCalled:
  1125  				t.Errorf("no RPC call expected, but '%v' was called", rpc)
  1126  			case <-time.After(10 * time.Millisecond):
  1127  			}
  1128  		} else {
  1129  			select {
  1130  			case rpc := <-mg.rpcCalled:
  1131  				if rpc != tt.rpcWant {
  1132  					t.Errorf("%s: expected '%v', got '%v'", tt.rpcMSG, tt.rpcWant, rpc)
  1133  				}
  1134  			case <-time.After(10 * time.Millisecond):
  1135  				t.Errorf("%s: expected '%v', but no RPC was called", tt.rpcMSG, tt.rpcWant)
  1136  			}
  1137  		}
  1138  	}
  1139  }
  1140  
  1141  // TestIntegrationBroadcastRelayHeader checks that broadcasting RelayHeader
  1142  // causes peers to also broadcast the header (if the block is valid).
  1143  func TestIntegrationBroadcastRelayHeader(t *testing.T) {
  1144  	if testing.Short() {
  1145  		t.SkipNow()
  1146  	}
  1147  	// Setup consensus sets.
  1148  	cst1, err := blankConsensusSetTester("TestIntegrationBroadcastRelayHeader1")
  1149  	if err != nil {
  1150  		t.Fatal(err)
  1151  	}
  1152  	defer cst1.Close()
  1153  	cst2, err := blankConsensusSetTester("TestIntegrationBroadcastRelayHeader2")
  1154  	if err != nil {
  1155  		t.Fatal(err)
  1156  	}
  1157  	defer cst2.Close()
  1158  	// Setup mock gateway.
  1159  	mg := &mockGatewayDoesBroadcast{
  1160  		Gateway:         cst2.cs.gateway,
  1161  		broadcastCalled: make(chan struct{}),
  1162  	}
  1163  	cst2.cs.gateway = mg
  1164  	err = cst1.cs.gateway.Connect(cst2.cs.gateway.Address())
  1165  	if err != nil {
  1166  		t.Fatal(err)
  1167  	}
  1168  
  1169  	// Test that broadcasting an invalid block header over RelayHeader on cst1.cs
  1170  	// does not result in cst2.cs.gateway receiving a broadcast.
  1171  	cst1.cs.gateway.Broadcast("RelayHeader", types.BlockHeader{}, cst1.cs.gateway.Peers())
  1172  	select {
  1173  	case <-mg.broadcastCalled:
  1174  		t.Fatal("RelayHeader broadcasted an invalid block header")
  1175  	case <-time.After(200 * time.Millisecond):
  1176  	}
  1177  
  1178  	// Test that broadcasting a valid block header over RelayHeader on cst1.cs
  1179  	// causes cst2.cs.gateway to receive a broadcast.
  1180  	validBlock, err := cst1.miner.FindBlock()
  1181  	if err != nil {
  1182  		t.Fatal(err)
  1183  	}
  1184  	err = cst1.cs.managedAcceptBlock(validBlock)
  1185  	if err != nil {
  1186  		t.Fatal(err)
  1187  	}
  1188  	cst1.cs.gateway.Broadcast("RelayHeader", validBlock.Header(), cst1.cs.gateway.Peers())
  1189  	select {
  1190  	case <-mg.broadcastCalled:
  1191  		// Broadcast is called twice, once to broadcast blocks to peers <= v0.5.1
  1192  		// and once to broadcast block headers to peers > v0.5.1.
  1193  		<-mg.broadcastCalled
  1194  	case <-time.After(200 * time.Millisecond):
  1195  		t.Fatal("RelayHeader didn't broadcast a valid block header")
  1196  	}
  1197  }
  1198  
  1199  // TestIntegrationRelaySynchronize tests that blocks are relayed as they are
  1200  // accepted and that peers stay synchronized. This test is header/block
  1201  // broadcast agnostic.  When build.Version <= 0.5.1 block relaying will be
  1202  // tested. When build.Version > 0.5.1 header relaying will be tested.
  1203  func TestIntegrationRelaySynchronize(t *testing.T) {
  1204  	if testing.Short() {
  1205  		t.SkipNow()
  1206  	}
  1207  
  1208  	cst1, err := blankConsensusSetTester("TestRelaySynchronize1")
  1209  	if err != nil {
  1210  		t.Fatal(err)
  1211  	}
  1212  	defer cst1.Close()
  1213  	cst2, err := blankConsensusSetTester("TestRelaySynchronize2")
  1214  	if err != nil {
  1215  		t.Fatal(err)
  1216  	}
  1217  	defer cst2.Close()
  1218  	cst3, err := blankConsensusSetTester("TestRelaySynchronize3")
  1219  	if err != nil {
  1220  		t.Fatal(err)
  1221  	}
  1222  	defer cst3.Close()
  1223  
  1224  	// Connect them like so: cst1 <-> cst2 <-> cst3
  1225  	err = cst1.gateway.Connect(cst2.gateway.Address())
  1226  	if err != nil {
  1227  		t.Fatal(err)
  1228  	}
  1229  	err = cst2.gateway.Connect(cst3.gateway.Address())
  1230  	if err != nil {
  1231  		t.Fatal(err)
  1232  	}
  1233  	// Make sure cst1 is not connected to cst3.
  1234  	cst1.gateway.Disconnect(cst3.gateway.Address())
  1235  	cst3.gateway.Disconnect(cst1.gateway.Address())
  1236  
  1237  	// Spin until the connection calls have completed.
  1238  	for i := 0; i < 100; i++ {
  1239  		time.Sleep(50 * time.Millisecond)
  1240  		if len(cst1.gateway.Peers()) >= 1 && len(cst3.gateway.Peers()) >= 1 {
  1241  			break
  1242  		}
  1243  	}
  1244  	if len(cst1.gateway.Peers()) < 1 || len(cst3.gateway.Peers()) < 1 {
  1245  		t.Fatal("Peer connection has failed.")
  1246  	}
  1247  
  1248  	// Mine a block on cst1, expecting the block to propagate from cst1 to
  1249  	// cst2, and then to cst3.
  1250  	b, err := cst1.miner.AddBlock()
  1251  	if err != nil {
  1252  		t.Fatal(err)
  1253  	}
  1254  
  1255  	// Spin until the block has propagated to cst2.
  1256  	for i := 0; i < 100; i++ {
  1257  		time.Sleep(50 * time.Millisecond)
  1258  		if cst2.cs.CurrentBlock().ID() == b.ID() {
  1259  			break
  1260  		}
  1261  	}
  1262  	if cst2.cs.CurrentBlock().ID() != b.ID() {
  1263  		t.Fatal("Block propagation has failed")
  1264  	}
  1265  
  1266  	// Spin until the block has propagated to cst3.
  1267  	for i := 0; i < 100; i++ {
  1268  		time.Sleep(50 * time.Millisecond)
  1269  		if cst3.cs.CurrentBlock().ID() == b.ID() {
  1270  			break
  1271  		}
  1272  	}
  1273  	if cst3.cs.CurrentBlock().ID() != b.ID() {
  1274  		t.Fatal("Block propagation has failed")
  1275  	}
  1276  
  1277  	// Mine a block on cst2.
  1278  	b, err = cst2.miner.AddBlock()
  1279  	if err != nil {
  1280  		t.Fatal(err)
  1281  	}
  1282  	// Spin until the block has propagated.
  1283  	for i := 0; i < 100; i++ {
  1284  		time.Sleep(50 * time.Millisecond)
  1285  		if cst1.cs.CurrentBlock().ID() == b.ID() {
  1286  			break
  1287  		}
  1288  	}
  1289  	if cst1.cs.CurrentBlock().ID() != b.ID() {
  1290  		t.Fatal("block propagation has failed")
  1291  	}
  1292  	for i := 0; i < 100; i++ {
  1293  		time.Sleep(50 * time.Millisecond)
  1294  		if cst3.cs.CurrentBlock().ID() == b.ID() {
  1295  			break
  1296  		}
  1297  	}
  1298  	if cst3.cs.CurrentBlock().ID() != b.ID() {
  1299  		t.Fatal("block propagation has failed")
  1300  	}
  1301  
  1302  	// Mine a block on cst3.
  1303  	b, err = cst3.miner.AddBlock()
  1304  	if err != nil {
  1305  		t.Fatal(err)
  1306  	}
  1307  	// Spin until the block has propagated.
  1308  	for i := 0; i < 100; i++ {
  1309  		time.Sleep(50 * time.Millisecond)
  1310  		if cst1.cs.CurrentBlock().ID() == b.ID() {
  1311  			break
  1312  		}
  1313  	}
  1314  	if cst1.cs.CurrentBlock().ID() != b.ID() {
  1315  		t.Fatal("block propagation has failed")
  1316  	}
  1317  	for i := 0; i < 100; i++ {
  1318  		time.Sleep(50 * time.Millisecond)
  1319  		if cst2.cs.CurrentBlock().ID() == b.ID() {
  1320  			break
  1321  		}
  1322  	}
  1323  	if cst2.cs.CurrentBlock().ID() != b.ID() {
  1324  		t.Fatal("block propagation has failed")
  1325  	}
  1326  
  1327  	// Check that cst1 and cst3 are not peers, if they are peers then this test
  1328  	// is invalid because it has failed to be certain that blocks can make
  1329  	// multiple hops.
  1330  	if len(cst1.gateway.Peers()) != 1 || cst1.gateway.Peers()[0].NetAddress == cst3.gateway.Address() {
  1331  		t.Fatal("Test is invalid, cst1 and cst3 have connected to eachother")
  1332  	}
  1333  	if len(cst3.gateway.Peers()) != 1 || cst3.gateway.Peers()[0].NetAddress == cst1.gateway.Address() {
  1334  		t.Fatal("Test is invalid, cst3 and cst1 have connected to eachother")
  1335  	}
  1336  }
  1337  
  1338  // mockConnMockReadWrite is a mock implementation of net.Conn that returns
  1339  // fails reading or writing if readErr or writeErr is non-nil, respectively.
  1340  type mockConnMockReadWrite struct {
  1341  	net.Conn
  1342  	readErr  error
  1343  	writeErr error
  1344  }
  1345  
  1346  // Read is a mock implementation of conn.Read that fails with the mock error if
  1347  // readErr != nil.
  1348  func (conn mockConnMockReadWrite) Read(b []byte) (n int, err error) {
  1349  	if conn.readErr != nil {
  1350  		return 0, conn.readErr
  1351  	}
  1352  	return conn.Conn.Read(b)
  1353  }
  1354  
  1355  // Write is a mock implementation of conn.Write that fails with the mock error
  1356  // if writeErr != nil.
  1357  func (conn mockConnMockReadWrite) Write(b []byte) (n int, err error) {
  1358  	if conn.writeErr != nil {
  1359  		return 0, conn.writeErr
  1360  	}
  1361  	return conn.Conn.Write(b)
  1362  }
  1363  
  1364  // mockNetError is a mock net.Error.
  1365  type mockNetError struct {
  1366  	error
  1367  	timeout   bool
  1368  	temporary bool
  1369  }
  1370  
  1371  // Timeout is a mock implementation of net.Error.Timeout.
  1372  func (err mockNetError) Timeout() bool {
  1373  	return err.timeout
  1374  }
  1375  
  1376  // Temporary is a mock implementation of net.Error.Temporary.
  1377  func (err mockNetError) Temporary() bool {
  1378  	return err.temporary
  1379  }
  1380  
  1381  // TestThreadedReceiveBlocksStalls tests that threadedReceiveBlocks returns
  1382  // errSendBlocksStalled when the connection times out before a block is
  1383  // received.
  1384  func TestThreadedReceiveBlocksStalls(t *testing.T) {
  1385  	if testing.Short() {
  1386  		t.SkipNow()
  1387  	}
  1388  
  1389  	cst, err := blankConsensusSetTester("TestThreadedReceiveBlocksStalls")
  1390  	if err != nil {
  1391  		t.Fatal(err)
  1392  	}
  1393  	defer cst.Close()
  1394  
  1395  	p1, p2 := net.Pipe()
  1396  	writeTimeoutConn := mockConnMockReadWrite{
  1397  		Conn: p2,
  1398  		writeErr: mockNetError{
  1399  			error:   errors.New("Write timeout"),
  1400  			timeout: true,
  1401  		},
  1402  	}
  1403  	readTimeoutConn := mockConnMockReadWrite{
  1404  		Conn: p2,
  1405  		readErr: mockNetError{
  1406  			error:   errors.New("Read timeout"),
  1407  			timeout: true,
  1408  		},
  1409  	}
  1410  
  1411  	readNetErrConn := mockConnMockReadWrite{
  1412  		Conn: p2,
  1413  		readErr: mockNetError{
  1414  			error: errors.New("mock read net.Error"),
  1415  		},
  1416  	}
  1417  	writeNetErrConn := mockConnMockReadWrite{
  1418  		Conn: p2,
  1419  		writeErr: mockNetError{
  1420  			error: errors.New("mock write net.Error"),
  1421  		},
  1422  	}
  1423  
  1424  	readErrConn := mockConnMockReadWrite{
  1425  		Conn:    p2,
  1426  		readErr: errors.New("mock read err"),
  1427  	}
  1428  	writeErrConn := mockConnMockReadWrite{
  1429  		Conn:     p2,
  1430  		writeErr: errors.New("mock write err"),
  1431  	}
  1432  
  1433  	// Test that threadedReceiveBlocks errors with errSendBlocksStalled when 0
  1434  	// blocks have been sent and the conn times out.
  1435  	err = cst.cs.threadedReceiveBlocks(writeTimeoutConn)
  1436  	if err != errSendBlocksStalled {
  1437  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", errSendBlocksStalled, err)
  1438  	}
  1439  	errChan := make(chan error)
  1440  	go func() {
  1441  		var knownBlocks [32]types.BlockID
  1442  		errChan <- encoding.ReadObject(p1, &knownBlocks, 32*crypto.HashSize)
  1443  	}()
  1444  	err = cst.cs.threadedReceiveBlocks(readTimeoutConn)
  1445  	if err != errSendBlocksStalled {
  1446  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", errSendBlocksStalled, err)
  1447  	}
  1448  	err = <-errChan
  1449  	if err != nil {
  1450  		t.Fatal(err)
  1451  	}
  1452  
  1453  	// Test that threadedReceiveBlocks errors when writing the block history fails.
  1454  	// Test with an error of type net.Error.
  1455  	err = cst.cs.threadedReceiveBlocks(writeNetErrConn)
  1456  	if err != writeNetErrConn.writeErr {
  1457  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", writeNetErrConn.writeErr, err)
  1458  	}
  1459  	// Test with an error of type error.
  1460  	err = cst.cs.threadedReceiveBlocks(writeErrConn)
  1461  	if err != writeErrConn.writeErr {
  1462  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", writeErrConn.writeErr, err)
  1463  	}
  1464  
  1465  	// Test that threadedReceiveBlocks errors when reading blocks fails.
  1466  	// Test with an error of type net.Error.
  1467  	go func() {
  1468  		var knownBlocks [32]types.BlockID
  1469  		errChan <- encoding.ReadObject(p1, &knownBlocks, 32*crypto.HashSize)
  1470  	}()
  1471  	err = cst.cs.threadedReceiveBlocks(readNetErrConn)
  1472  	if err != readNetErrConn.readErr {
  1473  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", readNetErrConn.readErr, err)
  1474  	}
  1475  	err = <-errChan
  1476  	if err != nil {
  1477  		t.Fatal(err)
  1478  	}
  1479  	// Test with an error of type error.
  1480  	go func() {
  1481  		var knownBlocks [32]types.BlockID
  1482  		errChan <- encoding.ReadObject(p1, &knownBlocks, 32*crypto.HashSize)
  1483  	}()
  1484  	err = cst.cs.threadedReceiveBlocks(readErrConn)
  1485  	if err != readErrConn.readErr {
  1486  		t.Errorf("expected threadedReceiveBlocks to err with \"%v\", got \"%v\"", readErrConn.readErr, err)
  1487  	}
  1488  	err = <-errChan
  1489  	if err != nil {
  1490  		t.Fatal(err)
  1491  	}
  1492  
  1493  	// TODO: Test that threadedReceiveBlocks doesn't error with a timeout if it has received one block before this timed out read/write.
  1494  
  1495  	// TODO: Test that threadedReceiveBlocks doesn't error with errSendBlocksStalled if it successfully received one block.
  1496  }
  1497  
  1498  // TestIntegrationSendBlocksStalls tests that the SendBlocks RPC fails with
  1499  // errSendBlockStalled when the RPC timesout and the requesting end has
  1500  // received 0 blocks.
  1501  func TestIntegrationSendBlocksStalls(t *testing.T) {
  1502  	if testing.Short() {
  1503  		t.SkipNow()
  1504  	}
  1505  
  1506  	cstLocal, err := blankConsensusSetTester("TestThreadedReceiveBlocksTimesout - local")
  1507  	if err != nil {
  1508  		t.Fatal(err)
  1509  	}
  1510  	defer cstLocal.Close()
  1511  	cstRemote, err := blankConsensusSetTester("TestThreadedReceiveBlocksTimesout - remote")
  1512  	if err != nil {
  1513  		t.Fatal(err)
  1514  	}
  1515  	defer cstRemote.Close()
  1516  
  1517  	cstLocal.cs.gateway.Connect(cstRemote.cs.gateway.Address())
  1518  
  1519  	// Lock the remote CST so that SendBlocks blocks and timesout.
  1520  	cstRemote.cs.mu.Lock()
  1521  	defer cstRemote.cs.mu.Unlock()
  1522  	err = cstLocal.cs.gateway.RPC(cstRemote.cs.gateway.Address(), "SendBlocks", cstLocal.cs.threadedReceiveBlocks)
  1523  	if err != errSendBlocksStalled {
  1524  		t.Fatal(err)
  1525  	}
  1526  }