github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/orderer/common/cluster/replication_test.go (about)

     1  /*
     2  Copyright hechain. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package cluster_test
     8  
     9  import (
    10  	"io/ioutil"
    11  	"path/filepath"
    12  	"strings"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/golang/protobuf/proto"
    17  	"github.com/hechain20/hechain/bccsp/sw"
    18  	"github.com/hechain20/hechain/common/channelconfig"
    19  	"github.com/hechain20/hechain/common/configtx"
    20  	"github.com/hechain20/hechain/common/flogging"
    21  	"github.com/hechain20/hechain/internal/pkg/comm"
    22  	"github.com/hechain20/hechain/orderer/common/cluster"
    23  	"github.com/hechain20/hechain/orderer/common/cluster/mocks"
    24  	"github.com/hechain20/hechain/orderer/common/localconfig"
    25  	"github.com/hechain20/hechain/protoutil"
    26  	"github.com/hyperledger/fabric-protos-go/common"
    27  	"github.com/hyperledger/fabric-protos-go/msp"
    28  	"github.com/hyperledger/fabric-protos-go/orderer"
    29  	"github.com/pkg/errors"
    30  	"github.com/stretchr/testify/mock"
    31  	"github.com/stretchr/testify/require"
    32  	"go.uber.org/zap"
    33  	"go.uber.org/zap/zapcore"
    34  )
    35  
    36  func TestIsReplicationNeeded(t *testing.T) {
    37  	for _, testCase := range []struct {
    38  		name                string
    39  		bootBlock           *common.Block
    40  		systemChannelHeight uint64
    41  		systemChannelError  error
    42  		expectedError       string
    43  		replicationNeeded   bool
    44  	}{
    45  		{
    46  			name:                "no replication needed",
    47  			systemChannelHeight: 100,
    48  			bootBlock:           &common.Block{Header: &common.BlockHeader{Number: 99}},
    49  		},
    50  		{
    51  			name:                "replication is needed - bootstrap block's index equal to height",
    52  			systemChannelHeight: 99,
    53  			bootBlock:           &common.Block{Header: &common.BlockHeader{Number: 99}},
    54  			replicationNeeded:   true,
    55  		},
    56  		{
    57  			name:                "replication is needed - no ledger",
    58  			systemChannelHeight: 0,
    59  			bootBlock:           &common.Block{Header: &common.BlockHeader{Number: 99}},
    60  			replicationNeeded:   true,
    61  		},
    62  		{
    63  			name:               "IO error",
    64  			systemChannelError: errors.New("IO error"),
    65  			expectedError:      "IO error",
    66  		},
    67  	} {
    68  		t.Run(testCase.name, func(t *testing.T) {
    69  			ledgerWriter := &mocks.LedgerWriter{}
    70  			ledgerWriter.On("Height").Return(testCase.systemChannelHeight)
    71  
    72  			ledgerFactory := &mocks.LedgerFactory{}
    73  			ledgerFactory.On("GetOrCreate", "system").Return(ledgerWriter, testCase.systemChannelError)
    74  
    75  			r := cluster.Replicator{
    76  				Filter:        cluster.AnyChannel,
    77  				Logger:        flogging.MustGetLogger("test"),
    78  				BootBlock:     testCase.bootBlock,
    79  				SystemChannel: "system",
    80  				LedgerFactory: ledgerFactory,
    81  			}
    82  
    83  			ok, err := r.IsReplicationNeeded()
    84  			if testCase.expectedError != "" {
    85  				require.EqualError(t, err, testCase.expectedError)
    86  			} else {
    87  				require.NoError(t, err)
    88  				require.Equal(t, testCase.replicationNeeded, ok)
    89  			}
    90  		})
    91  	}
    92  }
    93  
    94  func TestReplicateChainsFailures(t *testing.T) {
    95  	for _, testCase := range []struct {
    96  		name                    string
    97  		isProbeResponseDelayed  bool
    98  		latestBlockSeqInOrderer uint64
    99  		ledgerFactoryError      error
   100  		appendBlockError        error
   101  		expectedPanic           string
   102  		mutateBlocks            func([]*common.Block)
   103  		channelsReturns         []cluster.ChannelGenesisBlock
   104  		badResponse             *orderer.DeliverResponse
   105  	}{
   106  		{
   107  			name: "no block received",
   108  			expectedPanic: "Failed pulling system channel: " +
   109  				"failed obtaining the latest block for channel system",
   110  			isProbeResponseDelayed: true,
   111  		},
   112  		{
   113  			name: "received service unavailable",
   114  			expectedPanic: "Failed pulling system channel: " +
   115  				"failed obtaining the latest block for channel system",
   116  			badResponse: &orderer.DeliverResponse{
   117  				Type: &orderer.DeliverResponse_Status{
   118  					Status: common.Status_SERVICE_UNAVAILABLE,
   119  				},
   120  			},
   121  		},
   122  		{
   123  			name: "latest block seq is less than boot block seq",
   124  			expectedPanic: "Failed pulling system channel: " +
   125  				"latest height found among system channel(system) orderers is 19," +
   126  				" but the boot block's sequence is 21",
   127  			latestBlockSeqInOrderer: 18,
   128  		},
   129  		{
   130  			name: "hash chain mismatch",
   131  			expectedPanic: "Failed pulling system channel: " +
   132  				"block header mismatch on sequence 11, " +
   133  				"expected 9cd61b7e9a5ea2d128cc877e5304e7205888175a8032d40b97db7412dca41d9e, got 010203",
   134  			latestBlockSeqInOrderer: 21,
   135  			mutateBlocks: func(systemChannelBlocks []*common.Block) {
   136  				systemChannelBlocks[len(systemChannelBlocks)/2].Header.PreviousHash = []byte{1, 2, 3}
   137  			},
   138  		},
   139  		{
   140  			name: "last pulled block doesn't match the boot block",
   141  			expectedPanic: "Block header mismatch on last system channel block," +
   142  				" expected 8ec93b2ef5ffdc302f0c0e24611be04ad2b17b099a1aeafd7cfb76a95923f146," +
   143  				" got e428decfc78f8e4c97b26da9c16f9d0b73f886dafa80477a0dd9bac7eb14fe7a",
   144  			latestBlockSeqInOrderer: 21,
   145  			mutateBlocks: func(systemChannelBlocks []*common.Block) {
   146  				systemChannelBlocks[21].Header.DataHash = nil
   147  			},
   148  		},
   149  		{
   150  			name:                    "failure in creating ledger",
   151  			latestBlockSeqInOrderer: 21,
   152  			ledgerFactoryError:      errors.New("IO error"),
   153  			expectedPanic:           "Failed to create a ledger for channel system: IO error",
   154  		},
   155  		{
   156  			name:                    "failure in appending a block to the ledger",
   157  			latestBlockSeqInOrderer: 21,
   158  			appendBlockError:        errors.New("IO error"),
   159  			expectedPanic:           "Failed to write block [0]: IO error",
   160  		},
   161  		{
   162  			name:                    "failure pulling the system chain",
   163  			latestBlockSeqInOrderer: 21,
   164  			expectedPanic: "Failed pulling system channel: " +
   165  				"failed obtaining the latest block for channel system",
   166  			isProbeResponseDelayed: true,
   167  		},
   168  		{
   169  			name:                    "failure obtaining a ledger for a non participating channel",
   170  			latestBlockSeqInOrderer: 21,
   171  			channelsReturns: []cluster.ChannelGenesisBlock{
   172  				{ChannelName: "channelWeAreNotPartOf"},
   173  			},
   174  			ledgerFactoryError: errors.New("IO error"),
   175  			expectedPanic:      "Failed to create a ledger for channel channelWeAreNotPartOf: IO error",
   176  		},
   177  	} {
   178  		t.Run(testCase.name, func(t *testing.T) {
   179  			systemChannelBlocks := createBlockChain(0, 21)
   180  			if testCase.mutateBlocks != nil {
   181  				testCase.mutateBlocks(systemChannelBlocks)
   182  			}
   183  
   184  			lw := &mocks.LedgerWriter{}
   185  			lw.On("Append", mock.Anything).Return(testCase.appendBlockError)
   186  			lw.On("Height").Return(uint64(0))
   187  
   188  			lf := &mocks.LedgerFactory{}
   189  			lf.On("GetOrCreate", "system").Return(lw, testCase.ledgerFactoryError)
   190  			lf.On("GetOrCreate", "channelWeAreNotPartOf").Return(lw, testCase.ledgerFactoryError)
   191  
   192  			osn := newClusterNode(t)
   193  			defer osn.stop()
   194  
   195  			dialer := newCountingDialer()
   196  			bp := newBlockPuller(dialer, osn.srv.Address())
   197  			// Put a big timeout, to reduce chance of flakes when the server gets stuck
   198  			// and we get an un-called for timeout.
   199  			bp.FetchTimeout = time.Hour
   200  
   201  			cl := &mocks.ChannelLister{}
   202  			cl.On("Channels").Return(testCase.channelsReturns)
   203  			cl.On("Close")
   204  
   205  			r := cluster.Replicator{
   206  				Filter: cluster.AnyChannel,
   207  				AmIPartOfChannel: func(configBlock *common.Block) error {
   208  					return cluster.ErrNotInChannel
   209  				},
   210  				Logger:        flogging.MustGetLogger("test"),
   211  				BootBlock:     systemChannelBlocks[21],
   212  				SystemChannel: "system",
   213  				LedgerFactory: lf,
   214  				Puller:        bp,
   215  				ChannelLister: cl,
   216  			}
   217  
   218  			if len(testCase.channelsReturns) > 0 {
   219  				simulateNonParticipantChannelPull(osn)
   220  			}
   221  
   222  			if testCase.badResponse != nil {
   223  				osn.blockResponses <- testCase.badResponse
   224  			}
   225  
   226  			if !testCase.isProbeResponseDelayed {
   227  				osn.enqueueResponse(testCase.latestBlockSeqInOrderer)
   228  				osn.enqueueResponse(testCase.latestBlockSeqInOrderer)
   229  			} else {
   230  				// Send a nil to force an EOF to the client
   231  				osn.blockResponses <- nil
   232  			}
   233  			osn.addExpectProbeAssert()
   234  			osn.addExpectProbeAssert()
   235  			osn.addExpectPullAssert(0)
   236  
   237  			if !testCase.isProbeResponseDelayed {
   238  				for _, block := range systemChannelBlocks {
   239  					osn.blockResponses <- &orderer.DeliverResponse{
   240  						Type: &orderer.DeliverResponse_Block{Block: block},
   241  					}
   242  				}
   243  			} else {
   244  				// Send a nil to force an EOF to the client
   245  				osn.blockResponses <- nil
   246  			}
   247  
   248  			require.PanicsWithValue(t, testCase.expectedPanic, func() { r.ReplicateChains() })
   249  			bp.Close()
   250  			dialer.assertAllConnectionsClosed(t)
   251  		})
   252  	}
   253  }
   254  
   255  func TestPullChannelFailure(t *testing.T) {
   256  	blockchain := createBlockChain(0, 5)
   257  	for _, testcase := range []struct {
   258  		name                 string
   259  		genesisBlockSequence int
   260  		thirdBlockSequence   int
   261  	}{
   262  		{
   263  			name:                 "Failed to pull genesis block",
   264  			genesisBlockSequence: 1,
   265  		},
   266  		{
   267  			name:                 "Failed to pull some non genesis block",
   268  			genesisBlockSequence: 0,
   269  			thirdBlockSequence:   0,
   270  		},
   271  	} {
   272  		t.Run(testcase.name, func(t *testing.T) {
   273  			lw := &mocks.LedgerWriter{}
   274  			lw.On("Append", mock.Anything).Return(nil)
   275  			lw.On("Height").Return(uint64(0))
   276  
   277  			lf := &mocks.LedgerFactory{}
   278  			lf.On("GetOrCreate", "mychannel").Return(lw, nil)
   279  
   280  			osn := newClusterNode(t)
   281  			defer osn.stop()
   282  
   283  			enqueueBlock := func(seq int) {
   284  				osn.blockResponses <- &orderer.DeliverResponse{
   285  					Type: &orderer.DeliverResponse_Block{
   286  						Block: blockchain[seq],
   287  					},
   288  				}
   289  			}
   290  
   291  			dialer := newCountingDialer()
   292  			bp := newBlockPuller(dialer, osn.srv.Address())
   293  			// Put a big timeout, to reduce chance of flakes when the server gets stuck
   294  			// and we get an un-called for timeout.
   295  			bp.FetchTimeout = time.Hour
   296  			bp.MaxPullBlockRetries = 1
   297  			// Do not buffer blocks in memory
   298  			bp.MaxTotalBufferBytes = 1
   299  
   300  			r := cluster.Replicator{
   301  				Filter: cluster.AnyChannel,
   302  				AmIPartOfChannel: func(configBlock *common.Block) error {
   303  					return nil
   304  				},
   305  				Logger:        flogging.MustGetLogger("test"),
   306  				SystemChannel: "system",
   307  				LedgerFactory: lf,
   308  				Puller:        bp,
   309  			}
   310  
   311  			osn.addExpectProbeAssert()
   312  			enqueueBlock(5)
   313  			osn.addExpectProbeAssert()
   314  			enqueueBlock(5)
   315  			osn.addExpectPullAssert(0)
   316  			enqueueBlock(testcase.genesisBlockSequence)
   317  			enqueueBlock(1)
   318  			enqueueBlock(testcase.thirdBlockSequence)
   319  
   320  			err := r.PullChannel("mychannel")
   321  			require.Equal(t, cluster.ErrRetryCountExhausted, err)
   322  		})
   323  	}
   324  }
   325  
   326  func TestPullerConfigFromTopLevelConfig(t *testing.T) {
   327  	signer := &mocks.SignerSerializer{}
   328  	expected := cluster.PullerConfig{
   329  		Channel:             "system",
   330  		MaxTotalBufferBytes: 100,
   331  		Signer:              signer,
   332  		TLSCert:             []byte{3, 2, 1},
   333  		TLSKey:              []byte{1, 2, 3},
   334  		Timeout:             time.Hour,
   335  	}
   336  
   337  	topLevelConfig := &localconfig.TopLevel{
   338  		General: localconfig.General{
   339  			Cluster: localconfig.Cluster{
   340  				ReplicationBufferSize: 100,
   341  				RPCTimeout:            time.Hour,
   342  			},
   343  		},
   344  	}
   345  
   346  	config := cluster.PullerConfigFromTopLevelConfig("system", topLevelConfig, []byte{1, 2, 3}, []byte{3, 2, 1}, signer)
   347  	require.Equal(t, expected, config)
   348  }
   349  
   350  func TestReplicateChainsChannelClassificationFailure(t *testing.T) {
   351  	// Scenario: We are unable to classify whether we are part of the channel,
   352  	// so we crash, because this is a programming error.
   353  
   354  	block30WithConfigBlockOf21 := protoutil.NewBlock(30, nil)
   355  	block30WithConfigBlockOf21.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{
   356  		Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: 21}),
   357  	})
   358  
   359  	osn := newClusterNode(t)
   360  	defer osn.stop()
   361  	osn.blockResponses = make(chan *orderer.DeliverResponse, 1000)
   362  
   363  	dialer := newCountingDialer()
   364  	bp := newBlockPuller(dialer, osn.srv.Address())
   365  	bp.FetchTimeout = time.Hour
   366  
   367  	channelLister := &mocks.ChannelLister{}
   368  	channelLister.On("Channels").Return([]cluster.ChannelGenesisBlock{{ChannelName: "A"}})
   369  	channelLister.On("Close")
   370  
   371  	// We probe for the latest block of the orderer
   372  	osn.addExpectProbeAssert()
   373  	osn.enqueueResponse(30)
   374  
   375  	// And now pull it again (first poll and then pull it for real).
   376  	osn.addExpectProbeAssert()
   377  	osn.enqueueResponse(30)
   378  	osn.addExpectPullAssert(30)
   379  	osn.blockResponses <- &orderer.DeliverResponse{
   380  		Type: &orderer.DeliverResponse_Block{Block: block30WithConfigBlockOf21},
   381  	}
   382  	// Now we pull the latest config block extracted from the previous block pulled.
   383  	// Beforehand we reconnect to the orderer, so we put an artificial signal to close the stream on the server side,
   384  	// in order to expect for a new stream to be established.
   385  	osn.blockResponses <- nil
   386  	// The orderer's last block's sequence is 30,
   387  	osn.addExpectProbeAssert()
   388  	osn.enqueueResponse(30)
   389  	// And the Replicator now asks for block 21.
   390  	osn.enqueueResponse(21)
   391  	osn.addExpectPullAssert(21)
   392  
   393  	r := cluster.Replicator{
   394  		Filter: cluster.AnyChannel,
   395  		AmIPartOfChannel: func(configBlock *common.Block) error {
   396  			return errors.New("oops")
   397  		},
   398  		Logger:        flogging.MustGetLogger("test"),
   399  		SystemChannel: "system",
   400  		ChannelLister: channelLister,
   401  		Puller:        bp,
   402  	}
   403  
   404  	require.PanicsWithValue(t, "Failed classifying whether I belong to channel A: oops, skipping chain retrieval", func() {
   405  		r.ReplicateChains()
   406  	})
   407  
   408  	bp.Close()
   409  	dialer.assertAllConnectionsClosed(t)
   410  }
   411  
   412  func TestReplicateChainsGreenPath(t *testing.T) {
   413  	// Scenario: There are 5 channels in the system: A-E.
   414  	// We are in channel A but not in channel B, therefore
   415  	// we should pull channel A and then the system channel.
   416  	// However, this is not the first attempt of replication for
   417  	// our node, but the second.
   418  	// In the past, the node pulled 10 blocks of channel A and crashed.
   419  	// Therefore, it should pull blocks, but commit for channel A
   420  	// only blocks starting from block number 10.
   421  	// For channel C - we are forbidden from pulling any blocks.
   422  	// Channel D is a deserted channel - all OSNs have left it,
   423  	// therefore we should not pull it at all.
   424  	// Channel E cannot be pulled at all, due to the OSN being unavailable
   425  	// at that time.
   426  
   427  	systemChannelBlocks := createBlockChain(0, 21)
   428  	block30WithConfigBlockOf21 := protoutil.NewBlock(30, nil)
   429  	block30WithConfigBlockOf21.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{
   430  		Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: 21}),
   431  	})
   432  
   433  	osn := newClusterNode(t)
   434  	defer osn.stop()
   435  	osn.blockResponses = make(chan *orderer.DeliverResponse, 1000)
   436  
   437  	dialer := newCountingDialer()
   438  	bp := newBlockPuller(dialer, osn.srv.Address())
   439  	bp.FetchTimeout = time.Hour
   440  	bp.MaxPullBlockRetries = 1
   441  
   442  	channelLister := &mocks.ChannelLister{}
   443  	channelLister.On("Channels").Return([]cluster.ChannelGenesisBlock{
   444  		{ChannelName: "E", GenesisBlock: fakeGB},
   445  		{ChannelName: "D", GenesisBlock: fakeGB},
   446  		{ChannelName: "C", GenesisBlock: fakeGB},
   447  		{ChannelName: "A", GenesisBlock: fakeGB},
   448  		{ChannelName: "B", GenesisBlock: fakeGB},
   449  	})
   450  	channelLister.On("Close")
   451  
   452  	amIPartOfChannelMock := &mock.Mock{}
   453  	// For channel A
   454  	amIPartOfChannelMock.On("func13").Return(nil).Once()
   455  	// For channel B
   456  	amIPartOfChannelMock.On("func13").Return(cluster.ErrNotInChannel).Once()
   457  
   458  	// 22 is for the system channel, and 31 is for channel A, and for channel B we only need 1 block (the GB).
   459  	blocksCommittedToLedgerA := make(chan *common.Block, 31)
   460  	blocksCommittedToLedgerB := make(chan *common.Block, 1)
   461  	blocksCommittedToLedgerC := make(chan *common.Block, 1)
   462  	blocksCommittedToLedgerD := make(chan *common.Block, 1)
   463  	blocksCommittedToLedgerE := make(chan *common.Block, 1)
   464  	blocksCommittedToSystemLedger := make(chan *common.Block, 22)
   465  	// Put 10 blocks in the ledger of channel A, to simulate
   466  	// that the ledger had blocks when the node started.
   467  	for seq := 0; seq < 10; seq++ {
   468  		blocksCommittedToLedgerA <- &common.Block{
   469  			Header: &common.BlockHeader{Number: uint64(seq)},
   470  		}
   471  	}
   472  
   473  	lwA := &mocks.LedgerWriter{}
   474  	lwA.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   475  		blocksCommittedToLedgerA <- arg.Get(0).(*common.Block)
   476  	})
   477  	lwA.On("Height").Return(func() uint64 {
   478  		return uint64(len(blocksCommittedToLedgerA))
   479  	})
   480  
   481  	lwB := &mocks.LedgerWriter{}
   482  	lwB.On("Height").Return(func() uint64 {
   483  		return uint64(len(blocksCommittedToLedgerB))
   484  	})
   485  	lwB.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   486  		blocksCommittedToLedgerB <- arg.Get(0).(*common.Block)
   487  	})
   488  
   489  	lwC := &mocks.LedgerWriter{}
   490  	lwC.On("Height").Return(func() uint64 {
   491  		return uint64(len(blocksCommittedToLedgerC))
   492  	})
   493  	lwC.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   494  		blocksCommittedToLedgerC <- arg.Get(0).(*common.Block)
   495  	})
   496  
   497  	lwD := &mocks.LedgerWriter{}
   498  	lwD.On("Height").Return(func() uint64 {
   499  		return uint64(len(blocksCommittedToLedgerD))
   500  	})
   501  	lwD.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   502  		blocksCommittedToLedgerD <- arg.Get(0).(*common.Block)
   503  	})
   504  
   505  	lwE := &mocks.LedgerWriter{}
   506  	lwE.On("Height").Return(func() uint64 {
   507  		return uint64(len(blocksCommittedToLedgerE))
   508  	})
   509  	lwE.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   510  		blocksCommittedToLedgerE <- arg.Get(0).(*common.Block)
   511  	})
   512  
   513  	lwSystem := &mocks.LedgerWriter{}
   514  	lwSystem.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   515  		blocksCommittedToSystemLedger <- arg.Get(0).(*common.Block)
   516  	})
   517  	lwSystem.On("Height").Return(func() uint64 {
   518  		return uint64(len(blocksCommittedToSystemLedger))
   519  	})
   520  
   521  	lf := &mocks.LedgerFactory{}
   522  	lf.On("Close")
   523  	lf.On("GetOrCreate", "A").Return(lwA, nil)
   524  	lf.On("GetOrCreate", "B").Return(lwB, nil)
   525  	lf.On("GetOrCreate", "C").Return(lwC, nil)
   526  	lf.On("GetOrCreate", "D").Return(lwD, nil)
   527  	lf.On("GetOrCreate", "E").Return(lwE, nil)
   528  	lf.On("GetOrCreate", "system").Return(lwSystem, nil)
   529  
   530  	r := cluster.Replicator{
   531  		Filter:        cluster.AnyChannel,
   532  		LedgerFactory: lf,
   533  		AmIPartOfChannel: func(configBlock *common.Block) error {
   534  			return amIPartOfChannelMock.Called().Error(0)
   535  		},
   536  		Logger:        flogging.MustGetLogger("test"),
   537  		SystemChannel: "system",
   538  		ChannelLister: channelLister,
   539  		Puller:        bp,
   540  		BootBlock:     systemChannelBlocks[21],
   541  	}
   542  
   543  	// The first thing the orderer gets is a seek to channel E.
   544  	// Unfortunately, it's not available!
   545  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   546  		// Ensure the seek came to the right channel
   547  		require.NotNil(osn.t, info.GetStart().GetNewest())
   548  		require.Equal(t, "E", actualChannel)
   549  	}
   550  	// Send an EOF down the stream.
   551  	osn.blockResponses <- nil
   552  
   553  	// The second thing the orderer gets is a seek to channel D,
   554  	// which is followed by a response of service unavailable
   555  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   556  		// Ensure the seek came to the right channel
   557  		require.NotNil(osn.t, info.GetStart().GetNewest())
   558  		require.Equal(t, "D", actualChannel)
   559  	}
   560  	osn.blockResponses <- &orderer.DeliverResponse{
   561  		Type: &orderer.DeliverResponse_Status{
   562  			Status: common.Status_SERVICE_UNAVAILABLE,
   563  		},
   564  	}
   565  
   566  	// The third thing the orderer gets is a seek to channel C,
   567  	// which is followed by a response of forbidden
   568  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   569  		// Ensure the seek came to the right channel
   570  		require.NotNil(osn.t, info.GetStart().GetNewest())
   571  		require.Equal(t, "C", actualChannel)
   572  	}
   573  
   574  	osn.blockResponses <- &orderer.DeliverResponse{
   575  		Type: &orderer.DeliverResponse_Status{
   576  			Status: common.Status_FORBIDDEN,
   577  		},
   578  	}
   579  
   580  	for _, channel := range []string{"A", "B"} {
   581  		channel := channel
   582  		// First, the orderer needs to figure out whether it is in the channel,
   583  		// so it reaches to find the latest block from all orderers to get
   584  		// the latest config block and see whether it is among the consenters.
   585  
   586  		// Orderer is expecting a poll for last block of the current channel
   587  		osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   588  			// Ensure the seek came to the right channel
   589  			require.NotNil(osn.t, info.GetStart().GetNewest())
   590  			require.Equal(t, channel, actualChannel)
   591  		}
   592  
   593  		// Orderer returns its last block is 30.
   594  		// This is needed to get the latest height by comparing among all orderers.
   595  		osn.enqueueResponse(30)
   596  
   597  		// First we poll for the block sequence we got previously again, from some orderer.
   598  		osn.addExpectProbeAssert()
   599  		osn.enqueueResponse(30)
   600  
   601  		// And afterwards pull the block from the first orderer.
   602  		osn.addExpectPullAssert(30)
   603  		osn.blockResponses <- &orderer.DeliverResponse{
   604  			Type: &orderer.DeliverResponse_Block{Block: block30WithConfigBlockOf21},
   605  		}
   606  		// And the last config block is pulled via reconnecting to the orderer.
   607  		osn.blockResponses <- nil
   608  		// The orderer's last block's sequence is 30,
   609  		osn.addExpectProbeAssert()
   610  		osn.enqueueResponse(30)
   611  		// And the Replicator now asks for block 21.
   612  		osn.enqueueResponse(21)
   613  		osn.addExpectPullAssert(21)
   614  		// We always close the connection before attempting to pull the next block
   615  		osn.blockResponses <- nil
   616  	}
   617  
   618  	// Next, the Replicator figures out the latest block sequence for that chain
   619  	// to know until when to pull
   620  
   621  	// We expect a probe for channel A only, because channel B isn't in the channel
   622  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   623  		// Ensure the seek came to the right channel
   624  		require.NotNil(osn.t, info.GetStart().GetNewest())
   625  		require.Equal(t, "A", actualChannel)
   626  	}
   627  	osn.enqueueResponse(30)
   628  	// From this point onwards, we pull the blocks for the chain.
   629  	osn.enqueueResponse(30)
   630  	osn.addExpectProbeAssert()
   631  	osn.addExpectPullAssert(10)
   632  	// Enqueue 31 blocks in its belly
   633  	for _, block := range createBlockChain(10, 30) {
   634  		osn.blockResponses <- &orderer.DeliverResponse{
   635  			Type: &orderer.DeliverResponse_Block{Block: block},
   636  		}
   637  	}
   638  	// Signal the orderer to stop sending us blocks since we're going to reconnect
   639  	// to it to ask for the next channel
   640  	osn.blockResponses <- nil
   641  
   642  	// Now we define assertions for the system channel
   643  	// Pull assertions for the system channel
   644  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   645  		// Ensure the seek came to the system channel.
   646  		require.NotNil(osn.t, info.GetStart().GetNewest())
   647  		require.Equal(t, "system", actualChannel)
   648  	}
   649  	osn.blockResponses <- &orderer.DeliverResponse{
   650  		Type: &orderer.DeliverResponse_Block{Block: systemChannelBlocks[21]},
   651  	}
   652  	osn.addExpectProbeAssert()
   653  	osn.enqueueResponse(21)
   654  	osn.addExpectPullAssert(0)
   655  	for _, block := range systemChannelBlocks {
   656  		osn.blockResponses <- &orderer.DeliverResponse{
   657  			Type: &orderer.DeliverResponse_Block{Block: block},
   658  		}
   659  	}
   660  
   661  	// This is where all the work is done.
   662  	// The above lines were all assertions and preparations
   663  	// for the expected flow of the test.
   664  	r.ReplicateChains()
   665  
   666  	// We replicated the chains, so all that left is to ensure
   667  	// the blocks were committed in order, and all blocks we expected
   668  	// to be committed (for channel A and the system channel) were committed.
   669  	close(blocksCommittedToLedgerA)
   670  	close(blocksCommittedToSystemLedger)
   671  	require.Len(t, blocksCommittedToLedgerA, cap(blocksCommittedToLedgerA))
   672  	require.Len(t, blocksCommittedToSystemLedger, cap(blocksCommittedToSystemLedger))
   673  	require.Len(t, blocksCommittedToLedgerB, 1)
   674  	require.Len(t, blocksCommittedToLedgerC, 1)
   675  	require.Len(t, blocksCommittedToLedgerD, 1)
   676  	require.Len(t, blocksCommittedToLedgerE, 1)
   677  	// Count the blocks for channel A
   678  	var expectedSequence uint64
   679  	for block := range blocksCommittedToLedgerA {
   680  		require.Equal(t, expectedSequence, block.Header.Number)
   681  		expectedSequence++
   682  	}
   683  
   684  	// Count the blocks for the system channel
   685  	expectedSequence = uint64(0)
   686  	for block := range blocksCommittedToSystemLedger {
   687  		require.Equal(t, expectedSequence, block.Header.Number)
   688  		expectedSequence++
   689  	}
   690  
   691  	bp.Close()
   692  	dialer.assertAllConnectionsClosed(t)
   693  }
   694  
   695  func TestParticipant(t *testing.T) {
   696  	for _, testCase := range []struct {
   697  		name                  string
   698  		heightsByEndpoints    map[string]uint64
   699  		heightsByEndpointsErr error
   700  		latestBlockSeq        uint64
   701  		latestBlock           *common.Block
   702  		latestConfigBlockSeq  uint64
   703  		latestConfigBlock     *common.Block
   704  		expectedError         string
   705  		predicateReturns      error
   706  	}{
   707  		{
   708  			name:          "No available orderer",
   709  			expectedError: cluster.ErrRetryCountExhausted.Error(),
   710  		},
   711  		{
   712  			name:                  "Unauthorized for the channel",
   713  			expectedError:         cluster.ErrForbidden.Error(),
   714  			heightsByEndpointsErr: cluster.ErrForbidden,
   715  		},
   716  		{
   717  			name:                  "No OSN services the channel",
   718  			expectedError:         cluster.ErrServiceUnavailable.Error(),
   719  			heightsByEndpointsErr: cluster.ErrServiceUnavailable,
   720  		},
   721  		{
   722  			name: "Pulled block has no metadata",
   723  			heightsByEndpoints: map[string]uint64{
   724  				"orderer.example.com:7050": 100,
   725  			},
   726  			latestBlockSeq: uint64(99),
   727  			latestBlock:    &common.Block{},
   728  			expectedError:  "failed to retrieve metadata: no metadata in block",
   729  		},
   730  		{
   731  			name: "Pulled block has no last config sequence in metadata",
   732  			heightsByEndpoints: map[string]uint64{
   733  				"orderer.example.com:7050": 100,
   734  			},
   735  			latestBlockSeq: uint64(99),
   736  			latestBlock: &common.Block{
   737  				Metadata: &common.BlockMetadata{
   738  					Metadata: nil,
   739  				},
   740  			},
   741  			expectedError: "failed to retrieve metadata: no metadata at index [SIGNATURES]",
   742  		},
   743  		{
   744  			name: "Pulled block's SIGNATURES metadata is malformed",
   745  			heightsByEndpoints: map[string]uint64{
   746  				"orderer.example.com:7050": 100,
   747  			},
   748  			latestBlockSeq: uint64(99),
   749  			latestBlock: &common.Block{
   750  				Metadata: &common.BlockMetadata{
   751  					Metadata: [][]byte{{1, 2, 3}},
   752  				},
   753  			},
   754  			expectedError: "failed to retrieve metadata: error unmarshalling metadata" +
   755  				" at index [SIGNATURES]: proto: common.Metadata: illegal tag 0 (wire type 1)",
   756  		},
   757  		{
   758  			name: "Pulled block's LAST_CONFIG metadata is malformed",
   759  			heightsByEndpoints: map[string]uint64{
   760  				"orderer.example.com:7050": 100,
   761  			},
   762  			latestBlockSeq: uint64(99),
   763  			latestBlock: &common.Block{
   764  				Metadata: &common.BlockMetadata{
   765  					Metadata: [][]byte{{}, {1, 2, 3}},
   766  				},
   767  			},
   768  			expectedError: "failed to retrieve metadata: error unmarshalling metadata" +
   769  				" at index [LAST_CONFIG]: proto: common.Metadata: illegal tag 0 (wire type 1)",
   770  		},
   771  		{
   772  			name: "Pulled block's metadata is valid and has a last config",
   773  			heightsByEndpoints: map[string]uint64{
   774  				"orderer.example.com:7050": 100,
   775  			},
   776  			latestBlockSeq: uint64(99),
   777  			latestBlock: &common.Block{
   778  				Metadata: &common.BlockMetadata{
   779  					Metadata: [][]byte{protoutil.MarshalOrPanic(&common.Metadata{
   780  						Value: protoutil.MarshalOrPanic(&common.OrdererBlockMetadata{
   781  							LastConfig: &common.LastConfig{Index: 42},
   782  						}),
   783  					})},
   784  				},
   785  			},
   786  			latestConfigBlockSeq: 42,
   787  			latestConfigBlock:    &common.Block{Header: &common.BlockHeader{Number: 42}},
   788  			predicateReturns:     cluster.ErrNotInChannel,
   789  		},
   790  		{
   791  			name:          "Failed pulling last block",
   792  			expectedError: cluster.ErrRetryCountExhausted.Error(),
   793  			heightsByEndpoints: map[string]uint64{
   794  				"orderer.example.com:7050": 100,
   795  			},
   796  			latestBlockSeq: uint64(99),
   797  			latestBlock:    nil,
   798  		},
   799  		{
   800  			name:          "Failed pulling last config block",
   801  			expectedError: cluster.ErrRetryCountExhausted.Error(),
   802  			heightsByEndpoints: map[string]uint64{
   803  				"orderer.example.com:7050": 100,
   804  			},
   805  			latestBlockSeq: uint64(99),
   806  			latestBlock: &common.Block{
   807  				Metadata: &common.BlockMetadata{
   808  					Metadata: [][]byte{protoutil.MarshalOrPanic(&common.Metadata{
   809  						Value: protoutil.MarshalOrPanic(&common.OrdererBlockMetadata{
   810  							LastConfig: &common.LastConfig{Index: 42},
   811  						}),
   812  					})},
   813  				},
   814  			},
   815  			latestConfigBlockSeq: 42,
   816  			latestConfigBlock:    nil,
   817  		},
   818  	} {
   819  		t.Run(testCase.name, func(t *testing.T) {
   820  			configBlocks := make(chan *common.Block, 1)
   821  			predicate := func(configBlock *common.Block) error {
   822  				configBlocks <- configBlock
   823  				return testCase.predicateReturns
   824  			}
   825  			puller := &mocks.ChainPuller{}
   826  			puller.On("HeightsByEndpoints").Return(testCase.heightsByEndpoints, testCase.heightsByEndpointsErr)
   827  			puller.On("PullBlock", testCase.latestBlockSeq).Return(testCase.latestBlock)
   828  			puller.On("PullBlock", testCase.latestConfigBlockSeq).Return(testCase.latestConfigBlock)
   829  			puller.On("Close")
   830  
   831  			err := cluster.Participant(puller, predicate)
   832  			if testCase.expectedError != "" {
   833  				require.EqualError(t, err, testCase.expectedError)
   834  				require.Len(t, configBlocks, 0)
   835  			} else {
   836  				require.Len(t, configBlocks, 1)
   837  				require.Equal(t, testCase.predicateReturns, err)
   838  			}
   839  		})
   840  	}
   841  }
   842  
   843  func TestBlockPullerFromConfigBlockFailures(t *testing.T) {
   844  	blockBytes, err := ioutil.ReadFile("testdata/mychannel.block")
   845  	require.NoError(t, err)
   846  
   847  	validBlock := &common.Block{}
   848  	require.NoError(t, proto.Unmarshal(blockBytes, validBlock))
   849  
   850  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   851  	require.NoError(t, err)
   852  
   853  	for _, testCase := range []struct {
   854  		name         string
   855  		expectedErr  string
   856  		pullerConfig cluster.PullerConfig
   857  		block        *common.Block
   858  	}{
   859  		{
   860  			name:        "nil block",
   861  			expectedErr: "nil block",
   862  		},
   863  		{
   864  			name:        "invalid block",
   865  			expectedErr: "block data is nil",
   866  			block:       &common.Block{},
   867  		},
   868  		{
   869  			name: "bad envelope inside block",
   870  			expectedErr: "failed extracting bundle from envelope: " +
   871  				"failed to unmarshal payload from envelope: " +
   872  				"error unmarshalling Payload: " +
   873  				"proto: common.Payload: illegal tag 0 (wire type 1)",
   874  			block: &common.Block{
   875  				Data: &common.BlockData{
   876  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
   877  						Payload: []byte{1, 2, 3},
   878  					})},
   879  				},
   880  			},
   881  		},
   882  		{
   883  			name:        "invalid TLS certificate",
   884  			expectedErr: "unable to decode TLS certificate PEM: ////",
   885  			block:       validBlock,
   886  			pullerConfig: cluster.PullerConfig{
   887  				TLSCert: []byte{255, 255, 255},
   888  			},
   889  		},
   890  	} {
   891  		t.Run(testCase.name, func(t *testing.T) {
   892  			verifierRetriever := &mocks.VerifierRetriever{}
   893  			verifierRetriever.On("RetrieveVerifier", mock.Anything).Return(&cluster.NoopBlockVerifier{})
   894  			bp, err := cluster.BlockPullerFromConfigBlock(testCase.pullerConfig, testCase.block, verifierRetriever, cryptoProvider)
   895  			require.EqualError(t, err, testCase.expectedErr)
   896  			require.Nil(t, bp)
   897  		})
   898  	}
   899  }
   900  
   901  func testBlockPullerFromConfig(t *testing.T, blockVerifiers []cluster.BlockVerifier, expectedLogMsg string, iterations int) {
   902  	verifierRetriever := &mocks.VerifierRetriever{}
   903  	for _, blockVerifier := range blockVerifiers {
   904  		verifierRetriever.On("RetrieveVerifier", mock.Anything).Return(blockVerifier).Once()
   905  	}
   906  
   907  	caCert, err := ioutil.ReadFile(filepath.Join("testdata", "ca.crt"))
   908  	require.NoError(t, err)
   909  
   910  	tlsCert, err := ioutil.ReadFile(filepath.Join("testdata", "server.crt"))
   911  	require.NoError(t, err)
   912  
   913  	tlsKey, err := ioutil.ReadFile(filepath.Join("testdata", "server.key"))
   914  	require.NoError(t, err)
   915  
   916  	osn := newClusterNode(t)
   917  	osn.srv.Stop()
   918  	// Replace the gRPC server with a TLS one
   919  	osn.srv, err = comm.NewGRPCServer("127.0.0.1:0", comm.ServerConfig{
   920  		SecOpts: comm.SecureOptions{
   921  			Key:               tlsKey,
   922  			RequireClientCert: true,
   923  			Certificate:       tlsCert,
   924  			ClientRootCAs:     [][]byte{caCert},
   925  			UseTLS:            true,
   926  		},
   927  	})
   928  	require.NoError(t, err)
   929  	orderer.RegisterAtomicBroadcastServer(osn.srv.Server(), osn)
   930  	// And start it
   931  	go osn.srv.Start()
   932  	defer osn.stop()
   933  
   934  	// Start from a valid configuration block
   935  	blockBytes, err := ioutil.ReadFile(filepath.Join("testdata", "mychannel.block"))
   936  	require.NoError(t, err)
   937  
   938  	validBlock := &common.Block{}
   939  	require.NoError(t, proto.Unmarshal(blockBytes, validBlock))
   940  
   941  	// And inject into it a 127.0.0.1 orderer endpoint endpoint and a new TLS CA certificate.
   942  	injectTLSCACert(t, validBlock, caCert)
   943  	injectGlobalOrdererEndpoint(t, validBlock, osn.srv.Address())
   944  	validBlock.Header.DataHash = protoutil.BlockDataHash(validBlock.Data)
   945  
   946  	for attempt := 0; attempt < iterations; attempt++ {
   947  		blockMsg := &orderer.DeliverResponse_Block{
   948  			Block: validBlock,
   949  		}
   950  
   951  		osn.blockResponses <- &orderer.DeliverResponse{
   952  			Type: blockMsg,
   953  		}
   954  
   955  		osn.blockResponses <- &orderer.DeliverResponse{
   956  			Type: blockMsg,
   957  		}
   958  
   959  		osn.blockResponses <- nil
   960  
   961  		osn.addExpectProbeAssert()
   962  		osn.addExpectPullAssert(0)
   963  	}
   964  
   965  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   966  	require.NoError(t, err)
   967  
   968  	bp, err := cluster.BlockPullerFromConfigBlock(cluster.PullerConfig{
   969  		TLSCert:             tlsCert,
   970  		TLSKey:              tlsKey,
   971  		MaxTotalBufferBytes: 1,
   972  		Channel:             "mychannel",
   973  		Signer:              &mocks.SignerSerializer{},
   974  		Timeout:             time.Hour,
   975  	}, validBlock, verifierRetriever, cryptoProvider)
   976  	bp.RetryTimeout = time.Millisecond * 10
   977  	require.NoError(t, err)
   978  	defer bp.Close()
   979  
   980  	var seenExpectedLogMsg bool
   981  
   982  	bp.Logger = bp.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
   983  		if strings.Contains(entry.Message, expectedLogMsg) {
   984  			seenExpectedLogMsg = true
   985  		}
   986  		return nil
   987  	}))
   988  
   989  	block := bp.PullBlock(0)
   990  	require.Equal(t, uint64(0), block.Header.Number)
   991  	require.True(t, seenExpectedLogMsg)
   992  }
   993  
   994  func TestSkipPullingPulledChannels(t *testing.T) {
   995  	blockchain := createBlockChain(0, 5)
   996  	lw := &mocks.LedgerWriter{}
   997  	lw.On("Height").Return(uint64(6))
   998  
   999  	lf := &mocks.LedgerFactory{}
  1000  	lf.On("GetOrCreate", "mychannel").Return(lw, nil)
  1001  
  1002  	osn := newClusterNode(t)
  1003  	defer osn.stop()
  1004  
  1005  	enqueueBlock := func(seq int) {
  1006  		osn.blockResponses <- &orderer.DeliverResponse{
  1007  			Type: &orderer.DeliverResponse_Block{
  1008  				Block: blockchain[seq],
  1009  			},
  1010  		}
  1011  	}
  1012  
  1013  	dialer := newCountingDialer()
  1014  	bp := newBlockPuller(dialer, osn.srv.Address())
  1015  	bp.FetchTimeout = time.Hour
  1016  
  1017  	r := cluster.Replicator{
  1018  		Filter: cluster.AnyChannel,
  1019  		AmIPartOfChannel: func(configBlock *common.Block) error {
  1020  			return nil
  1021  		},
  1022  		Logger:        flogging.MustGetLogger("test"),
  1023  		SystemChannel: "system",
  1024  		LedgerFactory: lf,
  1025  		Puller:        bp,
  1026  	}
  1027  
  1028  	var detectedChannelPulled bool
  1029  	r.Logger = r.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
  1030  		if strings.Contains(entry.Message, "Latest height found (6) is equal to our height, skipping pulling channel mychannel") {
  1031  			detectedChannelPulled = true
  1032  		}
  1033  		return nil
  1034  	}))
  1035  
  1036  	osn.addExpectProbeAssert()
  1037  	enqueueBlock(5)
  1038  	osn.addExpectProbeAssert()
  1039  	enqueueBlock(5)
  1040  
  1041  	err := r.PullChannel("mychannel")
  1042  	require.NoError(t, err)
  1043  	require.True(t, detectedChannelPulled)
  1044  }
  1045  
  1046  func TestBlockPullerFromConfigBlockGreenPath(t *testing.T) {
  1047  	for _, testCase := range []struct {
  1048  		description        string
  1049  		blockVerifiers     []cluster.BlockVerifier
  1050  		expectedLogMessage string
  1051  		iterations         int
  1052  	}{
  1053  		{
  1054  			description:        "Success",
  1055  			blockVerifiers:     []cluster.BlockVerifier{&cluster.NoopBlockVerifier{}},
  1056  			expectedLogMessage: "Got block [0] of size",
  1057  			iterations:         1,
  1058  		},
  1059  		{
  1060  			description: "Failure",
  1061  			iterations:  2,
  1062  			// First time it returns nil, second time returns like the success case
  1063  			blockVerifiers: []cluster.BlockVerifier{nil, &cluster.NoopBlockVerifier{}},
  1064  			expectedLogMessage: "Failed verifying received blocks: " +
  1065  				"couldn't acquire verifier for channel mychannel",
  1066  		},
  1067  	} {
  1068  		t.Run(testCase.description, func(t *testing.T) {
  1069  			testBlockPullerFromConfig(t, testCase.blockVerifiers,
  1070  				testCase.expectedLogMessage, testCase.iterations)
  1071  		})
  1072  	}
  1073  }
  1074  
  1075  func TestNoopBlockVerifier(t *testing.T) {
  1076  	v := &cluster.NoopBlockVerifier{}
  1077  	require.Nil(t, v.VerifyBlockSignature(nil, nil))
  1078  }
  1079  
  1080  func injectGlobalOrdererEndpoint(t *testing.T, block *common.Block, endpoint string) {
  1081  	ordererAddresses := channelconfig.OrdererAddressesValue([]string{endpoint})
  1082  	// Unwrap the layers until we reach the orderer addresses
  1083  	env, err := protoutil.ExtractEnvelope(block, 0)
  1084  	require.NoError(t, err)
  1085  	payload, err := protoutil.UnmarshalPayload(env.Payload)
  1086  	require.NoError(t, err)
  1087  	confEnv, err := configtx.UnmarshalConfigEnvelope(payload.Data)
  1088  	require.NoError(t, err)
  1089  	// Replace the orderer addresses
  1090  	confEnv.Config.ChannelGroup.Values[ordererAddresses.Key()] = &common.ConfigValue{
  1091  		Value:     protoutil.MarshalOrPanic(ordererAddresses.Value()),
  1092  		ModPolicy: "/Channel/Orderer/Admins",
  1093  	}
  1094  	// Remove the per org addresses, if applicable
  1095  	ordererGrps := confEnv.Config.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Groups
  1096  	for _, grp := range ordererGrps {
  1097  		if grp.Values[channelconfig.EndpointsKey] == nil {
  1098  			continue
  1099  		}
  1100  		grp.Values[channelconfig.EndpointsKey].Value = nil
  1101  	}
  1102  	// And put it back into the block
  1103  	payload.Data = protoutil.MarshalOrPanic(confEnv)
  1104  	env.Payload = protoutil.MarshalOrPanic(payload)
  1105  	block.Data.Data[0] = protoutil.MarshalOrPanic(env)
  1106  }
  1107  
  1108  func injectTLSCACert(t *testing.T, block *common.Block, tlsCA []byte) {
  1109  	// Unwrap the layers until we reach the TLS CA certificates
  1110  	env, err := protoutil.ExtractEnvelope(block, 0)
  1111  	require.NoError(t, err)
  1112  	payload, err := protoutil.UnmarshalPayload(env.Payload)
  1113  	require.NoError(t, err)
  1114  	confEnv, err := configtx.UnmarshalConfigEnvelope(payload.Data)
  1115  	require.NoError(t, err)
  1116  	mspKey := confEnv.Config.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Groups["OrdererOrg"].Values[channelconfig.MSPKey]
  1117  	rawMSPConfig := mspKey.Value
  1118  	mspConf := &msp.MSPConfig{}
  1119  	proto.Unmarshal(rawMSPConfig, mspConf)
  1120  	fabricMSPConf := &msp.FabricMSPConfig{}
  1121  	proto.Unmarshal(mspConf.Config, fabricMSPConf)
  1122  	// Replace the TLS root certs with the given ones
  1123  	fabricMSPConf.TlsRootCerts = [][]byte{tlsCA}
  1124  	// And put it back into the block
  1125  	mspConf.Config = protoutil.MarshalOrPanic(fabricMSPConf)
  1126  	mspKey.Value = protoutil.MarshalOrPanic(mspConf)
  1127  	payload.Data = protoutil.MarshalOrPanic(confEnv)
  1128  	env.Payload = protoutil.MarshalOrPanic(payload)
  1129  	block.Data.Data[0] = protoutil.MarshalOrPanic(env)
  1130  }
  1131  
  1132  func TestExtractGenesisBlock(t *testing.T) {
  1133  	for _, testCase := range []struct {
  1134  		name               string
  1135  		expectedErr        string
  1136  		returnedName       string
  1137  		block              *common.Block
  1138  		returnGenesisBlock bool
  1139  	}{
  1140  		{
  1141  			name:        "nil block",
  1142  			expectedErr: "nil block",
  1143  		},
  1144  		{
  1145  			name:        "no data section in block",
  1146  			expectedErr: "block data is nil",
  1147  			block:       &common.Block{},
  1148  		},
  1149  		{
  1150  			name: "corrupt envelope in block",
  1151  			expectedErr: "block data does not carry an" +
  1152  				" envelope at index 0: error unmarshalling Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)",
  1153  			block: &common.Block{
  1154  				Data: &common.BlockData{
  1155  					Data: [][]byte{{1, 2, 3}},
  1156  				},
  1157  			},
  1158  		},
  1159  		{
  1160  			name:        "corrupt payload in envelope",
  1161  			expectedErr: "error unmarshalling Payload: proto: common.Payload: illegal tag 0 (wire type 1)",
  1162  			block: &common.Block{
  1163  				Data: &common.BlockData{
  1164  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1165  						Payload: []byte{1, 2, 3},
  1166  					})},
  1167  				},
  1168  			},
  1169  		},
  1170  		{
  1171  			name:        "no header in block",
  1172  			expectedErr: "nil header in payload",
  1173  			block: &common.Block{
  1174  				Data: &common.BlockData{
  1175  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1176  						Payload: protoutil.MarshalOrPanic(&common.Payload{}),
  1177  					})},
  1178  				},
  1179  			},
  1180  		},
  1181  		{
  1182  			name: "corrupt channel header",
  1183  			expectedErr: "error unmarshalling ChannelHeader:" +
  1184  				" proto: common.ChannelHeader: illegal tag 0 (wire type 1)",
  1185  			block: &common.Block{
  1186  				Data: &common.BlockData{
  1187  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1188  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1189  							Header: &common.Header{
  1190  								ChannelHeader: []byte{1, 2, 3},
  1191  							},
  1192  						}),
  1193  					})},
  1194  				},
  1195  			},
  1196  		},
  1197  		{
  1198  			name:        "not an orderer transaction",
  1199  			expectedErr: "",
  1200  			block: &common.Block{
  1201  				Data: &common.BlockData{
  1202  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1203  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1204  							Header: &common.Header{
  1205  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1206  									Type: int32(common.HeaderType_CONFIG_UPDATE),
  1207  								}),
  1208  							},
  1209  						}),
  1210  					})},
  1211  				},
  1212  			},
  1213  		},
  1214  		{
  1215  			name:        "orderer transaction with corrupt inner envelope",
  1216  			expectedErr: "error unmarshalling Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)",
  1217  			block: &common.Block{
  1218  				Data: &common.BlockData{
  1219  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1220  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1221  							Header: &common.Header{
  1222  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1223  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1224  								}),
  1225  							},
  1226  							Data: []byte{1, 2, 3},
  1227  						}),
  1228  					})},
  1229  				},
  1230  			},
  1231  		},
  1232  		{
  1233  			name:        "orderer transaction with corrupt inner payload",
  1234  			expectedErr: "error unmarshalling Payload: proto: common.Payload: illegal tag 0 (wire type 1)",
  1235  			block: &common.Block{
  1236  				Data: &common.BlockData{
  1237  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1238  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1239  							Header: &common.Header{
  1240  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1241  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1242  								}),
  1243  							},
  1244  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1245  								Payload: []byte{1, 2, 3},
  1246  							}),
  1247  						}),
  1248  					})},
  1249  				},
  1250  			},
  1251  		},
  1252  		{
  1253  			name:        "orderer transaction with nil inner header",
  1254  			expectedErr: "inner payload's header is nil",
  1255  			block: &common.Block{
  1256  				Data: &common.BlockData{
  1257  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1258  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1259  							Header: &common.Header{
  1260  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1261  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1262  								}),
  1263  							},
  1264  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1265  								Payload: protoutil.MarshalOrPanic(&common.Payload{}),
  1266  							}),
  1267  						}),
  1268  					})},
  1269  				},
  1270  			},
  1271  		},
  1272  		{
  1273  			name:        "orderer transaction with corrupt inner channel header",
  1274  			expectedErr: "error unmarshalling ChannelHeader: proto: common.ChannelHeader: illegal tag 0 (wire type 1)",
  1275  			block: &common.Block{
  1276  				Data: &common.BlockData{
  1277  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1278  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1279  							Header: &common.Header{
  1280  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1281  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1282  								}),
  1283  							},
  1284  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1285  								Payload: protoutil.MarshalOrPanic(&common.Payload{
  1286  									Header: &common.Header{
  1287  										ChannelHeader: []byte{1, 2, 3},
  1288  									},
  1289  								}),
  1290  							}),
  1291  						}),
  1292  					})},
  1293  				},
  1294  			},
  1295  		},
  1296  		{
  1297  			name:        "orderer transaction that is not a config, but a config update",
  1298  			expectedErr: "",
  1299  			block: &common.Block{
  1300  				Data: &common.BlockData{
  1301  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1302  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1303  							Header: &common.Header{
  1304  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1305  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1306  								}),
  1307  							},
  1308  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1309  								Payload: protoutil.MarshalOrPanic(&common.Payload{
  1310  									Header: &common.Header{
  1311  										ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1312  											Type: int32(common.HeaderType_CONFIG_UPDATE),
  1313  										}),
  1314  									},
  1315  								}),
  1316  							}),
  1317  						}),
  1318  					})},
  1319  				},
  1320  			},
  1321  		},
  1322  		{
  1323  			expectedErr: "",
  1324  			name:        "orderer transaction that is a system channel config block",
  1325  			block: &common.Block{
  1326  				Data: &common.BlockData{
  1327  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1328  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1329  							Header: &common.Header{
  1330  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1331  									ChannelId: "systemChannel",
  1332  									Type:      int32(common.HeaderType_ORDERER_TRANSACTION),
  1333  								}),
  1334  							},
  1335  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1336  								Payload: protoutil.MarshalOrPanic(&common.Payload{
  1337  									Header: &common.Header{
  1338  										ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1339  											Type:      int32(common.HeaderType_CONFIG),
  1340  											ChannelId: "systemChannel",
  1341  										}),
  1342  									},
  1343  								}),
  1344  							}),
  1345  						}),
  1346  					})},
  1347  				},
  1348  			},
  1349  		},
  1350  		{
  1351  			name:         "orderer transaction that creates a new application channel",
  1352  			expectedErr:  "",
  1353  			returnedName: "notSystemChannel",
  1354  			block: &common.Block{
  1355  				Data: &common.BlockData{
  1356  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1357  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1358  							Header: &common.Header{
  1359  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1360  									ChannelId: "systemChannel",
  1361  									Type:      int32(common.HeaderType_ORDERER_TRANSACTION),
  1362  								}),
  1363  							},
  1364  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1365  								Payload: protoutil.MarshalOrPanic(&common.Payload{
  1366  									Header: &common.Header{
  1367  										ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1368  											Type:      int32(common.HeaderType_CONFIG),
  1369  											ChannelId: "notSystemChannel",
  1370  										}),
  1371  									},
  1372  								}),
  1373  							}),
  1374  						}),
  1375  					})},
  1376  				},
  1377  			},
  1378  			returnGenesisBlock: true,
  1379  		},
  1380  	} {
  1381  		t.Run(testCase.name, func(t *testing.T) {
  1382  			channelName, gb, err := cluster.ExtractGenesisBlock(flogging.MustGetLogger("test"), testCase.block)
  1383  			if testCase.expectedErr != "" {
  1384  				require.EqualError(t, err, testCase.expectedErr)
  1385  			} else {
  1386  				require.NoError(t, err)
  1387  			}
  1388  			require.Equal(t, testCase.returnedName, channelName)
  1389  			if testCase.returnGenesisBlock {
  1390  				require.NotNil(t, gb)
  1391  			} else {
  1392  				require.Nil(t, gb)
  1393  			}
  1394  		})
  1395  	}
  1396  }
  1397  
  1398  func TestChannels(t *testing.T) {
  1399  	makeBlock := func(outerChannelName, innerChannelName string) *common.Block {
  1400  		return &common.Block{
  1401  			Header: &common.BlockHeader{},
  1402  			Data: &common.BlockData{
  1403  				Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1404  					Payload: protoutil.MarshalOrPanic(&common.Payload{
  1405  						Header: &common.Header{
  1406  							ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1407  								ChannelId: outerChannelName,
  1408  								Type:      int32(common.HeaderType_ORDERER_TRANSACTION),
  1409  							}),
  1410  						},
  1411  						Data: protoutil.MarshalOrPanic(&common.Envelope{
  1412  							Payload: protoutil.MarshalOrPanic(&common.Payload{
  1413  								Header: &common.Header{
  1414  									ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1415  										Type:      int32(common.HeaderType_CONFIG),
  1416  										ChannelId: innerChannelName,
  1417  									}),
  1418  								},
  1419  							}),
  1420  						}),
  1421  					}),
  1422  				})},
  1423  			},
  1424  		}
  1425  	}
  1426  
  1427  	for _, testCase := range []struct {
  1428  		name               string
  1429  		prepareSystemChain func(systemChain []*common.Block)
  1430  		assertion          func(t *testing.T, ci *cluster.ChainInspector)
  1431  	}{
  1432  		{
  1433  			name: "happy path - artificial blocks",
  1434  			prepareSystemChain: func(systemChain []*common.Block) {
  1435  				assignHashes(systemChain)
  1436  			},
  1437  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1438  				actual := cluster.GenesisBlocks(ci.Channels())
  1439  				// Assert that the returned channels are returned in any order
  1440  				require.Contains(t, [][]string{{"mychannel", "mychannel2"}, {"mychannel2", "mychannel"}}, actual.Names())
  1441  			},
  1442  		},
  1443  		{
  1444  			name: "happy path - one block is not artificial but real",
  1445  			prepareSystemChain: func(systemChain []*common.Block) {
  1446  				blockbytes, err := ioutil.ReadFile(filepath.Join("testdata", "block3.pb"))
  1447  				require.NoError(t, err)
  1448  				block := &common.Block{}
  1449  				err = proto.Unmarshal(blockbytes, block)
  1450  				require.NoError(t, err)
  1451  
  1452  				systemChain[len(systemChain)/2-1] = block
  1453  				assignHashes(systemChain)
  1454  			},
  1455  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1456  				actual := cluster.GenesisBlocks(ci.Channels())
  1457  				// Assert that the returned channels are returned in any order
  1458  				require.Contains(t, [][]string{{"mychannel2", "bar"}, {"bar", "mychannel2"}}, actual.Names())
  1459  			},
  1460  		},
  1461  		{
  1462  			name: "bad path - pulled chain's last block hash doesn't match the last config block",
  1463  			prepareSystemChain: func(systemChain []*common.Block) {
  1464  				assignHashes(systemChain)
  1465  				systemChain[len(systemChain)-1].Header.PreviousHash = nil
  1466  			},
  1467  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1468  				panicValue := "System channel pulled doesn't match the boot last config block:" +
  1469  					" block [2]'s hash (bc4ef5cc8a61ac0747cc82df58bac9ad3278622c1cfc7a119b9b1068e422c9f1)" +
  1470  					" mismatches block [3]'s prev block hash ()"
  1471  				require.PanicsWithValue(t, panicValue, func() {
  1472  					ci.Channels()
  1473  				})
  1474  			},
  1475  		},
  1476  		{
  1477  			name: "bad path - hash chain mismatch",
  1478  			prepareSystemChain: func(systemChain []*common.Block) {
  1479  				assignHashes(systemChain)
  1480  				systemChain[len(systemChain)-2].Header.PreviousHash = nil
  1481  			},
  1482  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1483  				panicValue := "Claimed previous hash of block [2] is  but actual previous " +
  1484  					"hash is 920faeb0bd8a02b3f2553247359fb3b684819c75c6e5487bc7eed632841ddc5f"
  1485  				require.PanicsWithValue(t, panicValue, func() {
  1486  					ci.Channels()
  1487  				})
  1488  			},
  1489  		},
  1490  		{
  1491  			name: "bad path - a block cannot be classified",
  1492  			prepareSystemChain: func(systemChain []*common.Block) {
  1493  				assignHashes(systemChain)
  1494  				systemChain[len(systemChain)-2].Data.Data = [][]byte{{1, 2, 3}}
  1495  			},
  1496  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1497  				panicValue := "Failed extracting channel genesis block from config block: " +
  1498  					"block data does not carry an envelope at index 0: error unmarshalling " +
  1499  					"Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)"
  1500  				require.PanicsWithValue(t, panicValue, func() {
  1501  					ci.Channels()
  1502  				})
  1503  			},
  1504  		},
  1505  		{
  1506  			name: "bad path - failed pulling blocks",
  1507  			prepareSystemChain: func(systemChain []*common.Block) {
  1508  				assignHashes(systemChain)
  1509  				// Setting a block to nil makes the block puller return nil,
  1510  				// which signals failure of pulling a block.
  1511  				systemChain[len(systemChain)/2] = nil
  1512  			},
  1513  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1514  				panicValue := "Failed pulling block [2] from the system channel"
  1515  				require.PanicsWithValue(t, panicValue, func() {
  1516  					ci.Channels()
  1517  				})
  1518  			},
  1519  		},
  1520  	} {
  1521  		t.Run(testCase.name, func(t *testing.T) {
  1522  			systemChain := []*common.Block{
  1523  				makeBlock("systemChannel", "systemChannel"),
  1524  				makeBlock("systemChannel", "mychannel"),
  1525  				makeBlock("systemChannel", "mychannel2"),
  1526  				makeBlock("systemChannel", "systemChannel"),
  1527  			}
  1528  
  1529  			for i := 0; i < len(systemChain); i++ {
  1530  				systemChain[i].Header.DataHash = protoutil.BlockDataHash(systemChain[i].Data)
  1531  				systemChain[i].Header.Number = uint64(i)
  1532  			}
  1533  			testCase.prepareSystemChain(systemChain)
  1534  			puller := &mocks.ChainPuller{}
  1535  			puller.On("Close")
  1536  			for seq := uint64(0); int(seq) < len(systemChain)-1; seq++ {
  1537  				puller.On("PullBlock", seq).Return(systemChain[int(seq)])
  1538  			}
  1539  
  1540  			ci := &cluster.ChainInspector{
  1541  				Logger:          flogging.MustGetLogger("test"),
  1542  				Puller:          puller,
  1543  				LastConfigBlock: systemChain[len(systemChain)-1],
  1544  			}
  1545  			defer puller.AssertNumberOfCalls(t, "Close", 1)
  1546  			defer ci.Close()
  1547  			testCase.assertion(t, ci)
  1548  		})
  1549  	}
  1550  }
  1551  
  1552  var fakeGB = &common.Block{
  1553  	Header: &common.BlockHeader{},
  1554  	Metadata: &common.BlockMetadata{
  1555  		Metadata: [][]byte{{}, {}, {}, {}},
  1556  	},
  1557  	Data: &common.BlockData{
  1558  		Data: [][]byte{
  1559  			protoutil.MarshalOrPanic(&common.Envelope{
  1560  				Payload: protoutil.MarshalOrPanic(&common.Envelope{
  1561  					Payload: protoutil.MarshalOrPanic(&common.Config{
  1562  						Sequence: 1,
  1563  					}),
  1564  				}),
  1565  			}),
  1566  		},
  1567  	},
  1568  }
  1569  
  1570  func simulateNonParticipantChannelPull(osn *deliverServer) {
  1571  	lastBlock := protoutil.NewBlock(1, nil)
  1572  	lastBlock.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{
  1573  		Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: 0}),
  1574  	})
  1575  	// We first present a channel with a last block of 'lastBlock', that points to
  1576  	// the genesis block
  1577  	osn.addExpectProbeAssert()
  1578  	osn.blockResponses <- &orderer.DeliverResponse{
  1579  		Type: &orderer.DeliverResponse_Block{Block: lastBlock},
  1580  	}
  1581  	osn.addExpectProbeAssert()
  1582  	osn.blockResponses <- &orderer.DeliverResponse{
  1583  		Type: &orderer.DeliverResponse_Block{Block: lastBlock},
  1584  	}
  1585  	osn.addExpectPullAssert(1)
  1586  	osn.blockResponses <- &orderer.DeliverResponse{
  1587  		Type: &orderer.DeliverResponse_Block{Block: lastBlock},
  1588  	}
  1589  	osn.blockResponses <- nil
  1590  
  1591  	// and make it send back the genesis block.
  1592  	// First send is for probing,
  1593  	osn.addExpectProbeAssert()
  1594  	osn.blockResponses <- &orderer.DeliverResponse{
  1595  		Type: &orderer.DeliverResponse_Block{Block: fakeGB},
  1596  	}
  1597  	osn.addExpectPullAssert(0)
  1598  	// and the second one sends the actual block itself downstream
  1599  	osn.blockResponses <- &orderer.DeliverResponse{
  1600  		Type: &orderer.DeliverResponse_Block{Block: fakeGB},
  1601  	}
  1602  
  1603  	osn.blockResponses <- nil
  1604  }
  1605  
  1606  func TestFilter(t *testing.T) {
  1607  	logger := flogging.MustGetLogger("test")
  1608  	logger = logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
  1609  		require.Equal(t, "Channel foo shouldn't be pulled. Skipping it", entry.Message)
  1610  		return nil
  1611  	}))
  1612  
  1613  	r := &cluster.Replicator{
  1614  		Filter: func(_ string) bool {
  1615  			return false
  1616  		},
  1617  		Logger: logger,
  1618  	}
  1619  	require.Equal(t, cluster.ErrSkipped, r.PullChannel("foo"))
  1620  }