github.com/anjalikarhana/fabric@v2.1.1+incompatible/orderer/common/cluster/replication_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package cluster_test
     8  
     9  import (
    10  	"io/ioutil"
    11  	"path/filepath"
    12  	"strings"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/golang/protobuf/proto"
    17  	"github.com/hyperledger/fabric-protos-go/common"
    18  	"github.com/hyperledger/fabric-protos-go/msp"
    19  	"github.com/hyperledger/fabric-protos-go/orderer"
    20  	"github.com/hyperledger/fabric/bccsp/sw"
    21  	"github.com/hyperledger/fabric/common/channelconfig"
    22  	"github.com/hyperledger/fabric/common/configtx"
    23  	"github.com/hyperledger/fabric/common/flogging"
    24  	"github.com/hyperledger/fabric/internal/pkg/comm"
    25  	"github.com/hyperledger/fabric/orderer/common/cluster"
    26  	"github.com/hyperledger/fabric/orderer/common/cluster/mocks"
    27  	"github.com/hyperledger/fabric/orderer/common/localconfig"
    28  	"github.com/hyperledger/fabric/protoutil"
    29  	"github.com/pkg/errors"
    30  	"github.com/stretchr/testify/assert"
    31  	"github.com/stretchr/testify/mock"
    32  	"go.uber.org/zap"
    33  	"go.uber.org/zap/zapcore"
    34  )
    35  
    36  func TestIsReplicationNeeded(t *testing.T) {
    37  	for _, testCase := range []struct {
    38  		name                string
    39  		bootBlock           *common.Block
    40  		systemChannelHeight uint64
    41  		systemChannelError  error
    42  		expectedError       string
    43  		replicationNeeded   bool
    44  	}{
    45  		{
    46  			name:                "no replication needed",
    47  			systemChannelHeight: 100,
    48  			bootBlock:           &common.Block{Header: &common.BlockHeader{Number: 99}},
    49  		},
    50  		{
    51  			name:                "replication is needed - bootstrap block's index equal to height",
    52  			systemChannelHeight: 99,
    53  			bootBlock:           &common.Block{Header: &common.BlockHeader{Number: 99}},
    54  			replicationNeeded:   true,
    55  		},
    56  		{
    57  			name:                "replication is needed - no ledger",
    58  			systemChannelHeight: 0,
    59  			bootBlock:           &common.Block{Header: &common.BlockHeader{Number: 99}},
    60  			replicationNeeded:   true,
    61  		},
    62  		{
    63  			name:               "IO error",
    64  			systemChannelError: errors.New("IO error"),
    65  			expectedError:      "IO error",
    66  		},
    67  	} {
    68  		t.Run(testCase.name, func(t *testing.T) {
    69  			ledgerWriter := &mocks.LedgerWriter{}
    70  			ledgerWriter.On("Height").Return(testCase.systemChannelHeight)
    71  
    72  			ledgerFactory := &mocks.LedgerFactory{}
    73  			ledgerFactory.On("GetOrCreate", "system").Return(ledgerWriter, testCase.systemChannelError)
    74  
    75  			r := cluster.Replicator{
    76  				Filter:        cluster.AnyChannel,
    77  				Logger:        flogging.MustGetLogger("test"),
    78  				BootBlock:     testCase.bootBlock,
    79  				SystemChannel: "system",
    80  				LedgerFactory: ledgerFactory,
    81  			}
    82  
    83  			ok, err := r.IsReplicationNeeded()
    84  			if testCase.expectedError != "" {
    85  				assert.EqualError(t, err, testCase.expectedError)
    86  			} else {
    87  				assert.NoError(t, err)
    88  				assert.Equal(t, testCase.replicationNeeded, ok)
    89  			}
    90  		})
    91  	}
    92  }
    93  
    94  func TestReplicateChainsFailures(t *testing.T) {
    95  	for _, testCase := range []struct {
    96  		name                    string
    97  		isProbeResponseDelayed  bool
    98  		latestBlockSeqInOrderer uint64
    99  		ledgerFactoryError      error
   100  		appendBlockError        error
   101  		expectedPanic           string
   102  		mutateBlocks            func([]*common.Block)
   103  		channelsReturns         []cluster.ChannelGenesisBlock
   104  		badResponse             *orderer.DeliverResponse
   105  	}{
   106  		{
   107  			name: "no block received",
   108  			expectedPanic: "Failed pulling system channel: " +
   109  				"failed obtaining the latest block for channel system",
   110  			isProbeResponseDelayed: true,
   111  		},
   112  		{
   113  			name: "received service unavailable",
   114  			expectedPanic: "Failed pulling system channel: " +
   115  				"failed obtaining the latest block for channel system",
   116  			badResponse: &orderer.DeliverResponse{
   117  				Type: &orderer.DeliverResponse_Status{
   118  					Status: common.Status_SERVICE_UNAVAILABLE,
   119  				},
   120  			},
   121  		},
   122  		{
   123  			name: "latest block seq is less than boot block seq",
   124  			expectedPanic: "Failed pulling system channel: " +
   125  				"latest height found among system channel(system) orderers is 19," +
   126  				" but the boot block's sequence is 21",
   127  			latestBlockSeqInOrderer: 18,
   128  		},
   129  		{
   130  			name: "hash chain mismatch",
   131  			expectedPanic: "Failed pulling system channel: " +
   132  				"block header mismatch on sequence 11, " +
   133  				"expected 9cd61b7e9a5ea2d128cc877e5304e7205888175a8032d40b97db7412dca41d9e, got 010203",
   134  			latestBlockSeqInOrderer: 21,
   135  			mutateBlocks: func(systemChannelBlocks []*common.Block) {
   136  				systemChannelBlocks[len(systemChannelBlocks)/2].Header.PreviousHash = []byte{1, 2, 3}
   137  			},
   138  		},
   139  		{
   140  			name: "last pulled block doesn't match the boot block",
   141  			expectedPanic: "Block header mismatch on last system channel block," +
   142  				" expected 8ec93b2ef5ffdc302f0c0e24611be04ad2b17b099a1aeafd7cfb76a95923f146," +
   143  				" got e428decfc78f8e4c97b26da9c16f9d0b73f886dafa80477a0dd9bac7eb14fe7a",
   144  			latestBlockSeqInOrderer: 21,
   145  			mutateBlocks: func(systemChannelBlocks []*common.Block) {
   146  				systemChannelBlocks[21].Header.DataHash = nil
   147  			},
   148  		},
   149  		{
   150  			name:                    "failure in creating ledger",
   151  			latestBlockSeqInOrderer: 21,
   152  			ledgerFactoryError:      errors.New("IO error"),
   153  			expectedPanic:           "Failed to create a ledger for channel system: IO error",
   154  		},
   155  		{
   156  			name:                    "failure in appending a block to the ledger",
   157  			latestBlockSeqInOrderer: 21,
   158  			appendBlockError:        errors.New("IO error"),
   159  			expectedPanic:           "Failed to write block [0]: IO error",
   160  		},
   161  		{
   162  			name:                    "failure pulling the system chain",
   163  			latestBlockSeqInOrderer: 21,
   164  			expectedPanic: "Failed pulling system channel: " +
   165  				"failed obtaining the latest block for channel system",
   166  			isProbeResponseDelayed: true,
   167  		},
   168  		{
   169  			name:                    "failure obtaining a ledger for a non participating channel",
   170  			latestBlockSeqInOrderer: 21,
   171  			channelsReturns: []cluster.ChannelGenesisBlock{
   172  				{ChannelName: "channelWeAreNotPartOf"},
   173  			},
   174  			ledgerFactoryError: errors.New("IO error"),
   175  			expectedPanic:      "Failed to create a ledger for channel channelWeAreNotPartOf: IO error",
   176  		},
   177  	} {
   178  		t.Run(testCase.name, func(t *testing.T) {
   179  			systemChannelBlocks := createBlockChain(0, 21)
   180  			if testCase.mutateBlocks != nil {
   181  				testCase.mutateBlocks(systemChannelBlocks)
   182  			}
   183  
   184  			lw := &mocks.LedgerWriter{}
   185  			lw.On("Append", mock.Anything).Return(testCase.appendBlockError)
   186  			lw.On("Height").Return(uint64(0))
   187  
   188  			lf := &mocks.LedgerFactory{}
   189  			lf.On("GetOrCreate", "system").Return(lw, testCase.ledgerFactoryError)
   190  			lf.On("GetOrCreate", "channelWeAreNotPartOf").Return(lw, testCase.ledgerFactoryError)
   191  
   192  			osn := newClusterNode(t)
   193  			defer osn.stop()
   194  
   195  			dialer := newCountingDialer()
   196  			bp := newBlockPuller(dialer, osn.srv.Address())
   197  			// Put a big timeout, to reduce chance of flakes when the server gets stuck
   198  			// and we get an un-called for timeout.
   199  			bp.FetchTimeout = time.Hour
   200  
   201  			cl := &mocks.ChannelLister{}
   202  			cl.On("Channels").Return(testCase.channelsReturns)
   203  			cl.On("Close")
   204  
   205  			r := cluster.Replicator{
   206  				Filter: cluster.AnyChannel,
   207  				AmIPartOfChannel: func(configBlock *common.Block) error {
   208  					return cluster.ErrNotInChannel
   209  				},
   210  				Logger:        flogging.MustGetLogger("test"),
   211  				BootBlock:     systemChannelBlocks[21],
   212  				SystemChannel: "system",
   213  				LedgerFactory: lf,
   214  				Puller:        bp,
   215  				ChannelLister: cl,
   216  			}
   217  
   218  			if len(testCase.channelsReturns) > 0 {
   219  				simulateNonParticipantChannelPull(osn)
   220  			}
   221  
   222  			if testCase.badResponse != nil {
   223  				osn.blockResponses <- testCase.badResponse
   224  			}
   225  
   226  			if !testCase.isProbeResponseDelayed {
   227  				osn.enqueueResponse(testCase.latestBlockSeqInOrderer)
   228  				osn.enqueueResponse(testCase.latestBlockSeqInOrderer)
   229  			} else {
   230  				// Send a nil to force an EOF to the client
   231  				osn.blockResponses <- nil
   232  			}
   233  			osn.addExpectProbeAssert()
   234  			osn.addExpectProbeAssert()
   235  			osn.addExpectPullAssert(0)
   236  
   237  			if !testCase.isProbeResponseDelayed {
   238  				for _, block := range systemChannelBlocks {
   239  					osn.blockResponses <- &orderer.DeliverResponse{
   240  						Type: &orderer.DeliverResponse_Block{Block: block},
   241  					}
   242  				}
   243  			} else {
   244  				// Send a nil to force an EOF to the client
   245  				osn.blockResponses <- nil
   246  			}
   247  
   248  			assert.PanicsWithValue(t, testCase.expectedPanic, func() { r.ReplicateChains() })
   249  			bp.Close()
   250  			dialer.assertAllConnectionsClosed(t)
   251  		})
   252  	}
   253  }
   254  
   255  func TestPullChannelFailure(t *testing.T) {
   256  	blockchain := createBlockChain(0, 5)
   257  	for _, testcase := range []struct {
   258  		name                 string
   259  		genesisBlockSequence int
   260  		thirdBlockSequence   int
   261  	}{
   262  		{
   263  			name:                 "Failed to pull genesis block",
   264  			genesisBlockSequence: 1,
   265  		},
   266  		{
   267  			name:                 "Failed to pull some non genesis block",
   268  			genesisBlockSequence: 0,
   269  			thirdBlockSequence:   0,
   270  		},
   271  	} {
   272  		t.Run(testcase.name, func(t *testing.T) {
   273  			lw := &mocks.LedgerWriter{}
   274  			lw.On("Append", mock.Anything).Return(nil)
   275  			lw.On("Height").Return(uint64(0))
   276  
   277  			lf := &mocks.LedgerFactory{}
   278  			lf.On("GetOrCreate", "mychannel").Return(lw, nil)
   279  
   280  			osn := newClusterNode(t)
   281  			defer osn.stop()
   282  
   283  			enqueueBlock := func(seq int) {
   284  				osn.blockResponses <- &orderer.DeliverResponse{
   285  					Type: &orderer.DeliverResponse_Block{
   286  						Block: blockchain[seq],
   287  					},
   288  				}
   289  			}
   290  
   291  			dialer := newCountingDialer()
   292  			bp := newBlockPuller(dialer, osn.srv.Address())
   293  			// Put a big timeout, to reduce chance of flakes when the server gets stuck
   294  			// and we get an un-called for timeout.
   295  			bp.FetchTimeout = time.Hour
   296  			bp.MaxPullBlockRetries = 1
   297  			// Do not buffer blocks in memory
   298  			bp.MaxTotalBufferBytes = 1
   299  
   300  			r := cluster.Replicator{
   301  				Filter: cluster.AnyChannel,
   302  				AmIPartOfChannel: func(configBlock *common.Block) error {
   303  					return nil
   304  				},
   305  				Logger:        flogging.MustGetLogger("test"),
   306  				SystemChannel: "system",
   307  				LedgerFactory: lf,
   308  				Puller:        bp,
   309  			}
   310  
   311  			osn.addExpectProbeAssert()
   312  			enqueueBlock(5)
   313  			osn.addExpectProbeAssert()
   314  			enqueueBlock(5)
   315  			osn.addExpectPullAssert(0)
   316  			enqueueBlock(testcase.genesisBlockSequence)
   317  			enqueueBlock(1)
   318  			enqueueBlock(testcase.thirdBlockSequence)
   319  
   320  			err := r.PullChannel("mychannel")
   321  			assert.Equal(t, cluster.ErrRetryCountExhausted, err)
   322  		})
   323  	}
   324  
   325  }
   326  
   327  func TestPullerConfigFromTopLevelConfig(t *testing.T) {
   328  	signer := &mocks.SignerSerializer{}
   329  	expected := cluster.PullerConfig{
   330  		Channel:             "system",
   331  		MaxTotalBufferBytes: 100,
   332  		Signer:              signer,
   333  		TLSCert:             []byte{3, 2, 1},
   334  		TLSKey:              []byte{1, 2, 3},
   335  		Timeout:             time.Hour,
   336  	}
   337  
   338  	topLevelConfig := &localconfig.TopLevel{
   339  		General: localconfig.General{
   340  			Cluster: localconfig.Cluster{
   341  				ReplicationBufferSize: 100,
   342  				RPCTimeout:            time.Hour,
   343  			},
   344  		},
   345  	}
   346  
   347  	config := cluster.PullerConfigFromTopLevelConfig("system", topLevelConfig, []byte{1, 2, 3}, []byte{3, 2, 1}, signer)
   348  	assert.Equal(t, expected, config)
   349  }
   350  
   351  func TestReplicateChainsChannelClassificationFailure(t *testing.T) {
   352  	// Scenario: We are unable to classify whether we are part of the channel,
   353  	// so we crash, because this is a programming error.
   354  
   355  	block30WithConfigBlockOf21 := protoutil.NewBlock(30, nil)
   356  	block30WithConfigBlockOf21.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{
   357  		Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: 21}),
   358  	})
   359  
   360  	osn := newClusterNode(t)
   361  	defer osn.stop()
   362  	osn.blockResponses = make(chan *orderer.DeliverResponse, 1000)
   363  
   364  	dialer := newCountingDialer()
   365  	bp := newBlockPuller(dialer, osn.srv.Address())
   366  	bp.FetchTimeout = time.Hour
   367  
   368  	channelLister := &mocks.ChannelLister{}
   369  	channelLister.On("Channels").Return([]cluster.ChannelGenesisBlock{{ChannelName: "A"}})
   370  	channelLister.On("Close")
   371  
   372  	// We probe for the latest block of the orderer
   373  	osn.addExpectProbeAssert()
   374  	osn.enqueueResponse(30)
   375  
   376  	// And now pull it again (first poll and then pull it for real).
   377  	osn.addExpectProbeAssert()
   378  	osn.enqueueResponse(30)
   379  	osn.addExpectPullAssert(30)
   380  	osn.blockResponses <- &orderer.DeliverResponse{
   381  		Type: &orderer.DeliverResponse_Block{Block: block30WithConfigBlockOf21},
   382  	}
   383  	// Now we pull the latest config block extracted from the previous block pulled.
   384  	// Beforehand we reconnect to the orderer, so we put an artificial signal to close the stream on the server side,
   385  	// in order to expect for a new stream to be established.
   386  	osn.blockResponses <- nil
   387  	// The orderer's last block's sequence is 30,
   388  	osn.addExpectProbeAssert()
   389  	osn.enqueueResponse(30)
   390  	// And the Replicator now asks for block 21.
   391  	osn.enqueueResponse(21)
   392  	osn.addExpectPullAssert(21)
   393  
   394  	r := cluster.Replicator{
   395  		Filter: cluster.AnyChannel,
   396  		AmIPartOfChannel: func(configBlock *common.Block) error {
   397  			return errors.New("oops")
   398  		},
   399  		Logger:        flogging.MustGetLogger("test"),
   400  		SystemChannel: "system",
   401  		ChannelLister: channelLister,
   402  		Puller:        bp,
   403  	}
   404  
   405  	assert.PanicsWithValue(t, "Failed classifying whether I belong to channel A: oops, skipping chain retrieval", func() {
   406  		r.ReplicateChains()
   407  	})
   408  
   409  	bp.Close()
   410  	dialer.assertAllConnectionsClosed(t)
   411  }
   412  
   413  func TestReplicateChainsGreenPath(t *testing.T) {
   414  	// Scenario: There are 5 channels in the system: A-E.
   415  	// We are in channel A but not in channel B, therefore
   416  	// we should pull channel A and then the system channel.
   417  	// However, this is not the first attempt of replication for
   418  	// our node, but the second.
   419  	// In the past, the node pulled 10 blocks of channel A and crashed.
   420  	// Therefore, it should pull blocks, but commit for channel A
   421  	// only blocks starting from block number 10.
   422  	// For channel C - we are forbidden from pulling any blocks.
   423  	// Channel D is a deserted channel - all OSNs have left it,
   424  	// therefore we should not pull it at all.
   425  	// Channel E cannot be pulled at all, due to the OSN being unavailable
   426  	// at that time.
   427  
   428  	systemChannelBlocks := createBlockChain(0, 21)
   429  	block30WithConfigBlockOf21 := protoutil.NewBlock(30, nil)
   430  	block30WithConfigBlockOf21.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{
   431  		Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: 21}),
   432  	})
   433  
   434  	osn := newClusterNode(t)
   435  	defer osn.stop()
   436  	osn.blockResponses = make(chan *orderer.DeliverResponse, 1000)
   437  
   438  	dialer := newCountingDialer()
   439  	bp := newBlockPuller(dialer, osn.srv.Address())
   440  	bp.FetchTimeout = time.Hour
   441  	bp.MaxPullBlockRetries = 1
   442  
   443  	channelLister := &mocks.ChannelLister{}
   444  	channelLister.On("Channels").Return([]cluster.ChannelGenesisBlock{
   445  		{ChannelName: "E", GenesisBlock: fakeGB},
   446  		{ChannelName: "D", GenesisBlock: fakeGB}, {ChannelName: "C", GenesisBlock: fakeGB},
   447  		{ChannelName: "A", GenesisBlock: fakeGB}, {ChannelName: "B", GenesisBlock: fakeGB},
   448  	})
   449  	channelLister.On("Close")
   450  
   451  	amIPartOfChannelMock := &mock.Mock{}
   452  	// For channel A
   453  	amIPartOfChannelMock.On("func13").Return(nil).Once()
   454  	// For channel B
   455  	amIPartOfChannelMock.On("func13").Return(cluster.ErrNotInChannel).Once()
   456  
   457  	// 22 is for the system channel, and 31 is for channel A, and for channel B we only need 1 block (the GB).
   458  	blocksCommittedToLedgerA := make(chan *common.Block, 31)
   459  	blocksCommittedToLedgerB := make(chan *common.Block, 1)
   460  	blocksCommittedToLedgerC := make(chan *common.Block, 1)
   461  	blocksCommittedToLedgerD := make(chan *common.Block, 1)
   462  	blocksCommittedToLedgerE := make(chan *common.Block, 1)
   463  	blocksCommittedToSystemLedger := make(chan *common.Block, 22)
   464  	// Put 10 blocks in the ledger of channel A, to simulate
   465  	// that the ledger had blocks when the node started.
   466  	for seq := 0; seq < 10; seq++ {
   467  		blocksCommittedToLedgerA <- &common.Block{
   468  			Header: &common.BlockHeader{Number: uint64(seq)},
   469  		}
   470  	}
   471  
   472  	lwA := &mocks.LedgerWriter{}
   473  	lwA.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   474  		blocksCommittedToLedgerA <- arg.Get(0).(*common.Block)
   475  	})
   476  	lwA.On("Height").Return(func() uint64 {
   477  		return uint64(len(blocksCommittedToLedgerA))
   478  	})
   479  
   480  	lwB := &mocks.LedgerWriter{}
   481  	lwB.On("Height").Return(func() uint64 {
   482  		return uint64(len(blocksCommittedToLedgerB))
   483  	})
   484  	lwB.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   485  		blocksCommittedToLedgerB <- arg.Get(0).(*common.Block)
   486  	})
   487  
   488  	lwC := &mocks.LedgerWriter{}
   489  	lwC.On("Height").Return(func() uint64 {
   490  		return uint64(len(blocksCommittedToLedgerC))
   491  	})
   492  	lwC.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   493  		blocksCommittedToLedgerC <- arg.Get(0).(*common.Block)
   494  	})
   495  
   496  	lwD := &mocks.LedgerWriter{}
   497  	lwD.On("Height").Return(func() uint64 {
   498  		return uint64(len(blocksCommittedToLedgerD))
   499  	})
   500  	lwD.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   501  		blocksCommittedToLedgerD <- arg.Get(0).(*common.Block)
   502  	})
   503  
   504  	lwE := &mocks.LedgerWriter{}
   505  	lwE.On("Height").Return(func() uint64 {
   506  		return uint64(len(blocksCommittedToLedgerE))
   507  	})
   508  	lwE.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   509  		blocksCommittedToLedgerE <- arg.Get(0).(*common.Block)
   510  	})
   511  
   512  	lwSystem := &mocks.LedgerWriter{}
   513  	lwSystem.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) {
   514  		blocksCommittedToSystemLedger <- arg.Get(0).(*common.Block)
   515  	})
   516  	lwSystem.On("Height").Return(func() uint64 {
   517  		return uint64(len(blocksCommittedToSystemLedger))
   518  	})
   519  
   520  	lf := &mocks.LedgerFactory{}
   521  	lf.On("Close")
   522  	lf.On("GetOrCreate", "A").Return(lwA, nil)
   523  	lf.On("GetOrCreate", "B").Return(lwB, nil)
   524  	lf.On("GetOrCreate", "C").Return(lwC, nil)
   525  	lf.On("GetOrCreate", "D").Return(lwD, nil)
   526  	lf.On("GetOrCreate", "E").Return(lwE, nil)
   527  	lf.On("GetOrCreate", "system").Return(lwSystem, nil)
   528  
   529  	r := cluster.Replicator{
   530  		Filter:        cluster.AnyChannel,
   531  		LedgerFactory: lf,
   532  		AmIPartOfChannel: func(configBlock *common.Block) error {
   533  			return amIPartOfChannelMock.Called().Error(0)
   534  		},
   535  		Logger:        flogging.MustGetLogger("test"),
   536  		SystemChannel: "system",
   537  		ChannelLister: channelLister,
   538  		Puller:        bp,
   539  		BootBlock:     systemChannelBlocks[21],
   540  	}
   541  
   542  	// The first thing the orderer gets is a seek to channel E.
   543  	// Unfortunately, it's not available!
   544  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   545  		// Ensure the seek came to the right channel
   546  		assert.NotNil(osn.t, info.GetStart().GetNewest())
   547  		assert.Equal(t, "E", actualChannel)
   548  	}
   549  	// Send an EOF down the stream.
   550  	osn.blockResponses <- nil
   551  
   552  	// The second thing the orderer gets is a seek to channel D,
   553  	// which is followed by a response of service unavailable
   554  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   555  		// Ensure the seek came to the right channel
   556  		assert.NotNil(osn.t, info.GetStart().GetNewest())
   557  		assert.Equal(t, "D", actualChannel)
   558  	}
   559  	osn.blockResponses <- &orderer.DeliverResponse{
   560  		Type: &orderer.DeliverResponse_Status{
   561  			Status: common.Status_SERVICE_UNAVAILABLE,
   562  		},
   563  	}
   564  
   565  	// The third thing the orderer gets is a seek to channel C,
   566  	// which is followed by a response of forbidden
   567  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   568  		// Ensure the seek came to the right channel
   569  		assert.NotNil(osn.t, info.GetStart().GetNewest())
   570  		assert.Equal(t, "C", actualChannel)
   571  	}
   572  
   573  	osn.blockResponses <- &orderer.DeliverResponse{
   574  		Type: &orderer.DeliverResponse_Status{
   575  			Status: common.Status_FORBIDDEN,
   576  		},
   577  	}
   578  
   579  	for _, channel := range []string{"A", "B"} {
   580  		channel := channel
   581  		// First, the orderer needs to figure out whether it is in the channel,
   582  		// so it reaches to find the latest block from all orderers to get
   583  		// the latest config block and see whether it is among the consenters.
   584  
   585  		// Orderer is expecting a poll for last block of the current channel
   586  		osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   587  			// Ensure the seek came to the right channel
   588  			assert.NotNil(osn.t, info.GetStart().GetNewest())
   589  			assert.Equal(t, channel, actualChannel)
   590  		}
   591  
   592  		// Orderer returns its last block is 30.
   593  		// This is needed to get the latest height by comparing among all orderers.
   594  		osn.enqueueResponse(30)
   595  
   596  		// First we poll for the block sequence we got previously again, from some orderer.
   597  		osn.addExpectProbeAssert()
   598  		osn.enqueueResponse(30)
   599  
   600  		// And afterwards pull the block from the first orderer.
   601  		osn.addExpectPullAssert(30)
   602  		osn.blockResponses <- &orderer.DeliverResponse{
   603  			Type: &orderer.DeliverResponse_Block{Block: block30WithConfigBlockOf21},
   604  		}
   605  		// And the last config block is pulled via reconnecting to the orderer.
   606  		osn.blockResponses <- nil
   607  		// The orderer's last block's sequence is 30,
   608  		osn.addExpectProbeAssert()
   609  		osn.enqueueResponse(30)
   610  		// And the Replicator now asks for block 21.
   611  		osn.enqueueResponse(21)
   612  		osn.addExpectPullAssert(21)
   613  		// We always close the connection before attempting to pull the next block
   614  		osn.blockResponses <- nil
   615  	}
   616  
   617  	// Next, the Replicator figures out the latest block sequence for that chain
   618  	// to know until when to pull
   619  
   620  	// We expect a probe for channel A only, because channel B isn't in the channel
   621  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   622  		// Ensure the seek came to the right channel
   623  		assert.NotNil(osn.t, info.GetStart().GetNewest())
   624  		assert.Equal(t, "A", actualChannel)
   625  	}
   626  	osn.enqueueResponse(30)
   627  	// From this point onwards, we pull the blocks for the chain.
   628  	osn.enqueueResponse(30)
   629  	osn.addExpectProbeAssert()
   630  	osn.addExpectPullAssert(10)
   631  	// Enqueue 31 blocks in its belly
   632  	for _, block := range createBlockChain(10, 30) {
   633  		osn.blockResponses <- &orderer.DeliverResponse{
   634  			Type: &orderer.DeliverResponse_Block{Block: block},
   635  		}
   636  	}
   637  	// Signal the orderer to stop sending us blocks since we're going to reconnect
   638  	// to it to ask for the next channel
   639  	osn.blockResponses <- nil
   640  
   641  	// Now we define assertions for the system channel
   642  	// Pull assertions for the system channel
   643  	osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) {
   644  		// Ensure the seek came to the system channel.
   645  		assert.NotNil(osn.t, info.GetStart().GetNewest())
   646  		assert.Equal(t, "system", actualChannel)
   647  	}
   648  	osn.blockResponses <- &orderer.DeliverResponse{
   649  		Type: &orderer.DeliverResponse_Block{Block: systemChannelBlocks[21]},
   650  	}
   651  	osn.addExpectProbeAssert()
   652  	osn.enqueueResponse(21)
   653  	osn.addExpectPullAssert(0)
   654  	for _, block := range systemChannelBlocks {
   655  		osn.blockResponses <- &orderer.DeliverResponse{
   656  			Type: &orderer.DeliverResponse_Block{Block: block},
   657  		}
   658  	}
   659  
   660  	// This is where all the work is done.
   661  	// The above lines were all assertions and preparations
   662  	// for the expected flow of the test.
   663  	r.ReplicateChains()
   664  
   665  	// We replicated the chains, so all that left is to ensure
   666  	// the blocks were committed in order, and all blocks we expected
   667  	// to be committed (for channel A and the system channel) were committed.
   668  	close(blocksCommittedToLedgerA)
   669  	close(blocksCommittedToSystemLedger)
   670  	assert.Len(t, blocksCommittedToLedgerA, cap(blocksCommittedToLedgerA))
   671  	assert.Len(t, blocksCommittedToSystemLedger, cap(blocksCommittedToSystemLedger))
   672  	assert.Len(t, blocksCommittedToLedgerB, 1)
   673  	assert.Len(t, blocksCommittedToLedgerC, 1)
   674  	assert.Len(t, blocksCommittedToLedgerD, 1)
   675  	assert.Len(t, blocksCommittedToLedgerE, 1)
   676  	// Count the blocks for channel A
   677  	var expectedSequence uint64
   678  	for block := range blocksCommittedToLedgerA {
   679  		assert.Equal(t, expectedSequence, block.Header.Number)
   680  		expectedSequence++
   681  	}
   682  
   683  	// Count the blocks for the system channel
   684  	expectedSequence = uint64(0)
   685  	for block := range blocksCommittedToSystemLedger {
   686  		assert.Equal(t, expectedSequence, block.Header.Number)
   687  		expectedSequence++
   688  	}
   689  
   690  	bp.Close()
   691  	dialer.assertAllConnectionsClosed(t)
   692  }
   693  
   694  func TestParticipant(t *testing.T) {
   695  	for _, testCase := range []struct {
   696  		name                  string
   697  		heightsByEndpoints    map[string]uint64
   698  		heightsByEndpointsErr error
   699  		latestBlockSeq        uint64
   700  		latestBlock           *common.Block
   701  		latestConfigBlockSeq  uint64
   702  		latestConfigBlock     *common.Block
   703  		expectedError         string
   704  		predicateReturns      error
   705  	}{
   706  		{
   707  			name:          "No available orderer",
   708  			expectedError: cluster.ErrRetryCountExhausted.Error(),
   709  		},
   710  		{
   711  			name:                  "Unauthorized for the channel",
   712  			expectedError:         cluster.ErrForbidden.Error(),
   713  			heightsByEndpointsErr: cluster.ErrForbidden,
   714  		},
   715  		{
   716  			name:                  "No OSN services the channel",
   717  			expectedError:         cluster.ErrServiceUnavailable.Error(),
   718  			heightsByEndpointsErr: cluster.ErrServiceUnavailable,
   719  		},
   720  		{
   721  			name: "Pulled block has no metadata",
   722  			heightsByEndpoints: map[string]uint64{
   723  				"orderer.example.com:7050": 100,
   724  			},
   725  			latestBlockSeq: uint64(99),
   726  			latestBlock:    &common.Block{},
   727  			expectedError:  "failed to retrieve metadata: no metadata in block",
   728  		},
   729  		{
   730  			name: "Pulled block has no last config sequence in metadata",
   731  			heightsByEndpoints: map[string]uint64{
   732  				"orderer.example.com:7050": 100,
   733  			},
   734  			latestBlockSeq: uint64(99),
   735  			latestBlock: &common.Block{
   736  				Metadata: &common.BlockMetadata{
   737  					Metadata: nil,
   738  				},
   739  			},
   740  			expectedError: "failed to retrieve metadata: no metadata at index [SIGNATURES]",
   741  		},
   742  		{
   743  			name: "Pulled block's SIGNATURES metadata is malformed",
   744  			heightsByEndpoints: map[string]uint64{
   745  				"orderer.example.com:7050": 100,
   746  			},
   747  			latestBlockSeq: uint64(99),
   748  			latestBlock: &common.Block{
   749  				Metadata: &common.BlockMetadata{
   750  					Metadata: [][]byte{{1, 2, 3}},
   751  				},
   752  			},
   753  			expectedError: "failed to retrieve metadata: error unmarshaling metadata" +
   754  				" at index [SIGNATURES]: proto: common.Metadata: illegal tag 0 (wire type 1)",
   755  		},
   756  		{
   757  			name: "Pulled block's LAST_CONFIG metadata is malformed",
   758  			heightsByEndpoints: map[string]uint64{
   759  				"orderer.example.com:7050": 100,
   760  			},
   761  			latestBlockSeq: uint64(99),
   762  			latestBlock: &common.Block{
   763  				Metadata: &common.BlockMetadata{
   764  					Metadata: [][]byte{{}, {1, 2, 3}},
   765  				},
   766  			},
   767  			expectedError: "failed to retrieve metadata: error unmarshaling metadata" +
   768  				" at index [LAST_CONFIG]: proto: common.Metadata: illegal tag 0 (wire type 1)",
   769  		},
   770  		{
   771  			name: "Pulled block's metadata is valid and has a last config",
   772  			heightsByEndpoints: map[string]uint64{
   773  				"orderer.example.com:7050": 100,
   774  			},
   775  			latestBlockSeq: uint64(99),
   776  			latestBlock: &common.Block{
   777  				Metadata: &common.BlockMetadata{
   778  					Metadata: [][]byte{protoutil.MarshalOrPanic(&common.Metadata{
   779  						Value: protoutil.MarshalOrPanic(&common.OrdererBlockMetadata{
   780  							LastConfig: &common.LastConfig{Index: 42},
   781  						}),
   782  					})},
   783  				},
   784  			},
   785  			latestConfigBlockSeq: 42,
   786  			latestConfigBlock:    &common.Block{Header: &common.BlockHeader{Number: 42}},
   787  			predicateReturns:     cluster.ErrNotInChannel,
   788  		},
   789  		{
   790  			name:          "Failed pulling last block",
   791  			expectedError: cluster.ErrRetryCountExhausted.Error(),
   792  			heightsByEndpoints: map[string]uint64{
   793  				"orderer.example.com:7050": 100,
   794  			},
   795  			latestBlockSeq: uint64(99),
   796  			latestBlock:    nil,
   797  		},
   798  		{
   799  			name:          "Failed pulling last config block",
   800  			expectedError: cluster.ErrRetryCountExhausted.Error(),
   801  			heightsByEndpoints: map[string]uint64{
   802  				"orderer.example.com:7050": 100,
   803  			},
   804  			latestBlockSeq: uint64(99),
   805  			latestBlock: &common.Block{
   806  				Metadata: &common.BlockMetadata{
   807  					Metadata: [][]byte{protoutil.MarshalOrPanic(&common.Metadata{
   808  						Value: protoutil.MarshalOrPanic(&common.OrdererBlockMetadata{
   809  							LastConfig: &common.LastConfig{Index: 42},
   810  						}),
   811  					})},
   812  				},
   813  			},
   814  			latestConfigBlockSeq: 42,
   815  			latestConfigBlock:    nil,
   816  		},
   817  	} {
   818  		t.Run(testCase.name, func(t *testing.T) {
   819  			configBlocks := make(chan *common.Block, 1)
   820  			predicate := func(configBlock *common.Block) error {
   821  				configBlocks <- configBlock
   822  				return testCase.predicateReturns
   823  			}
   824  			puller := &mocks.ChainPuller{}
   825  			puller.On("HeightsByEndpoints").Return(testCase.heightsByEndpoints, testCase.heightsByEndpointsErr)
   826  			puller.On("PullBlock", testCase.latestBlockSeq).Return(testCase.latestBlock)
   827  			puller.On("PullBlock", testCase.latestConfigBlockSeq).Return(testCase.latestConfigBlock)
   828  			puller.On("Close")
   829  
   830  			err := cluster.Participant(puller, predicate)
   831  			if testCase.expectedError != "" {
   832  				assert.EqualError(t, err, testCase.expectedError)
   833  				assert.Len(t, configBlocks, 0)
   834  			} else {
   835  				assert.Len(t, configBlocks, 1)
   836  				assert.Equal(t, testCase.predicateReturns, err)
   837  			}
   838  		})
   839  	}
   840  }
   841  
   842  func TestBlockPullerFromConfigBlockFailures(t *testing.T) {
   843  	blockBytes, err := ioutil.ReadFile("testdata/mychannel.block")
   844  	assert.NoError(t, err)
   845  
   846  	validBlock := &common.Block{}
   847  	assert.NoError(t, proto.Unmarshal(blockBytes, validBlock))
   848  
   849  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   850  	assert.NoError(t, err)
   851  
   852  	for _, testCase := range []struct {
   853  		name         string
   854  		expectedErr  string
   855  		pullerConfig cluster.PullerConfig
   856  		block        *common.Block
   857  	}{
   858  		{
   859  			name:        "nil block",
   860  			expectedErr: "nil block",
   861  		},
   862  		{
   863  			name:        "invalid block",
   864  			expectedErr: "block data is nil",
   865  			block:       &common.Block{},
   866  		},
   867  		{
   868  			name: "bad envelope inside block",
   869  			expectedErr: "failed extracting bundle from envelope: " +
   870  				"failed to unmarshal payload from envelope: " +
   871  				"error unmarshaling Payload: " +
   872  				"proto: common.Payload: illegal tag 0 (wire type 1)",
   873  			block: &common.Block{
   874  				Data: &common.BlockData{
   875  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
   876  						Payload: []byte{1, 2, 3},
   877  					})},
   878  				},
   879  			},
   880  		},
   881  		{
   882  			name:        "invalid TLS certificate",
   883  			expectedErr: "unable to decode TLS certificate PEM: ////",
   884  			block:       validBlock,
   885  			pullerConfig: cluster.PullerConfig{
   886  				TLSCert: []byte{255, 255, 255},
   887  			},
   888  		},
   889  	} {
   890  		t.Run(testCase.name, func(t *testing.T) {
   891  			verifierRetriever := &mocks.VerifierRetriever{}
   892  			verifierRetriever.On("RetrieveVerifier", mock.Anything).Return(&cluster.NoopBlockVerifier{})
   893  			bp, err := cluster.BlockPullerFromConfigBlock(testCase.pullerConfig, testCase.block, verifierRetriever, cryptoProvider)
   894  			assert.EqualError(t, err, testCase.expectedErr)
   895  			assert.Nil(t, bp)
   896  		})
   897  	}
   898  }
   899  
   900  func testBlockPullerFromConfig(t *testing.T, blockVerifiers []cluster.BlockVerifier, expectedLogMsg string, iterations int) {
   901  	verifierRetriever := &mocks.VerifierRetriever{}
   902  	for _, blockVerifier := range blockVerifiers {
   903  		verifierRetriever.On("RetrieveVerifier", mock.Anything).Return(blockVerifier).Once()
   904  	}
   905  
   906  	caCert, err := ioutil.ReadFile(filepath.Join("testdata", "ca.crt"))
   907  	assert.NoError(t, err)
   908  
   909  	tlsCert, err := ioutil.ReadFile(filepath.Join("testdata", "server.crt"))
   910  	assert.NoError(t, err)
   911  
   912  	tlsKey, err := ioutil.ReadFile(filepath.Join("testdata", "server.key"))
   913  	assert.NoError(t, err)
   914  
   915  	osn := newClusterNode(t)
   916  	osn.srv.Stop()
   917  	// Replace the gRPC server with a TLS one
   918  	osn.srv, err = comm.NewGRPCServer("127.0.0.1:0", comm.ServerConfig{
   919  		SecOpts: comm.SecureOptions{
   920  			Key:               tlsKey,
   921  			RequireClientCert: true,
   922  			Certificate:       tlsCert,
   923  			ClientRootCAs:     [][]byte{caCert},
   924  			UseTLS:            true,
   925  		},
   926  	})
   927  	assert.NoError(t, err)
   928  	orderer.RegisterAtomicBroadcastServer(osn.srv.Server(), osn)
   929  	// And start it
   930  	go osn.srv.Start()
   931  	defer osn.stop()
   932  
   933  	// Start from a valid configuration block
   934  	blockBytes, err := ioutil.ReadFile(filepath.Join("testdata", "mychannel.block"))
   935  	assert.NoError(t, err)
   936  
   937  	validBlock := &common.Block{}
   938  	assert.NoError(t, proto.Unmarshal(blockBytes, validBlock))
   939  
   940  	// And inject into it a 127.0.0.1 orderer endpoint endpoint and a new TLS CA certificate.
   941  	injectTLSCACert(t, validBlock, caCert)
   942  	injectGlobalOrdererEndpoint(t, validBlock, osn.srv.Address())
   943  	validBlock.Header.DataHash = protoutil.BlockDataHash(validBlock.Data)
   944  
   945  	for attempt := 0; attempt < iterations; attempt++ {
   946  		blockMsg := &orderer.DeliverResponse_Block{
   947  			Block: validBlock,
   948  		}
   949  
   950  		osn.blockResponses <- &orderer.DeliverResponse{
   951  			Type: blockMsg,
   952  		}
   953  
   954  		osn.blockResponses <- &orderer.DeliverResponse{
   955  			Type: blockMsg,
   956  		}
   957  
   958  		osn.blockResponses <- nil
   959  
   960  		osn.addExpectProbeAssert()
   961  		osn.addExpectPullAssert(0)
   962  	}
   963  
   964  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   965  	assert.NoError(t, err)
   966  
   967  	bp, err := cluster.BlockPullerFromConfigBlock(cluster.PullerConfig{
   968  		TLSCert:             tlsCert,
   969  		TLSKey:              tlsKey,
   970  		MaxTotalBufferBytes: 1,
   971  		Channel:             "mychannel",
   972  		Signer:              &mocks.SignerSerializer{},
   973  		Timeout:             time.Hour,
   974  	}, validBlock, verifierRetriever, cryptoProvider)
   975  	bp.RetryTimeout = time.Millisecond * 10
   976  	assert.NoError(t, err)
   977  	defer bp.Close()
   978  
   979  	var seenExpectedLogMsg bool
   980  
   981  	bp.Logger = bp.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
   982  		if strings.Contains(entry.Message, expectedLogMsg) {
   983  			seenExpectedLogMsg = true
   984  		}
   985  		return nil
   986  	}))
   987  
   988  	block := bp.PullBlock(0)
   989  	assert.Equal(t, uint64(0), block.Header.Number)
   990  	assert.True(t, seenExpectedLogMsg)
   991  }
   992  
   993  func TestSkipPullingPulledChannels(t *testing.T) {
   994  	blockchain := createBlockChain(0, 5)
   995  	lw := &mocks.LedgerWriter{}
   996  	lw.On("Height").Return(uint64(6))
   997  
   998  	lf := &mocks.LedgerFactory{}
   999  	lf.On("GetOrCreate", "mychannel").Return(lw, nil)
  1000  
  1001  	osn := newClusterNode(t)
  1002  	defer osn.stop()
  1003  
  1004  	enqueueBlock := func(seq int) {
  1005  		osn.blockResponses <- &orderer.DeliverResponse{
  1006  			Type: &orderer.DeliverResponse_Block{
  1007  				Block: blockchain[seq],
  1008  			},
  1009  		}
  1010  	}
  1011  
  1012  	dialer := newCountingDialer()
  1013  	bp := newBlockPuller(dialer, osn.srv.Address())
  1014  	bp.FetchTimeout = time.Hour
  1015  
  1016  	r := cluster.Replicator{
  1017  		Filter: cluster.AnyChannel,
  1018  		AmIPartOfChannel: func(configBlock *common.Block) error {
  1019  			return nil
  1020  		},
  1021  		Logger:        flogging.MustGetLogger("test"),
  1022  		SystemChannel: "system",
  1023  		LedgerFactory: lf,
  1024  		Puller:        bp,
  1025  	}
  1026  
  1027  	var detectedChannelPulled bool
  1028  	r.Logger = r.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
  1029  		if strings.Contains(entry.Message, "Latest height found (6) is equal to our height, skipping pulling channel mychannel") {
  1030  			detectedChannelPulled = true
  1031  		}
  1032  		return nil
  1033  	}))
  1034  
  1035  	osn.addExpectProbeAssert()
  1036  	enqueueBlock(5)
  1037  	osn.addExpectProbeAssert()
  1038  	enqueueBlock(5)
  1039  
  1040  	err := r.PullChannel("mychannel")
  1041  	assert.NoError(t, err)
  1042  	assert.True(t, detectedChannelPulled)
  1043  }
  1044  
  1045  func TestBlockPullerFromConfigBlockGreenPath(t *testing.T) {
  1046  	for _, testCase := range []struct {
  1047  		description        string
  1048  		blockVerifiers     []cluster.BlockVerifier
  1049  		expectedLogMessage string
  1050  		iterations         int
  1051  	}{
  1052  		{
  1053  			description:        "Success",
  1054  			blockVerifiers:     []cluster.BlockVerifier{&cluster.NoopBlockVerifier{}},
  1055  			expectedLogMessage: "Got block [0] of size",
  1056  			iterations:         1,
  1057  		},
  1058  		{
  1059  			description: "Failure",
  1060  			iterations:  2,
  1061  			// First time it returns nil, second time returns like the success case
  1062  			blockVerifiers: []cluster.BlockVerifier{nil, &cluster.NoopBlockVerifier{}},
  1063  			expectedLogMessage: "Failed verifying received blocks: " +
  1064  				"couldn't acquire verifier for channel mychannel",
  1065  		},
  1066  	} {
  1067  		t.Run(testCase.description, func(t *testing.T) {
  1068  			testBlockPullerFromConfig(t, testCase.blockVerifiers,
  1069  				testCase.expectedLogMessage, testCase.iterations)
  1070  		})
  1071  	}
  1072  }
  1073  
  1074  func TestNoopBlockVerifier(t *testing.T) {
  1075  	v := &cluster.NoopBlockVerifier{}
  1076  	assert.Nil(t, v.VerifyBlockSignature(nil, nil))
  1077  }
  1078  
  1079  func injectGlobalOrdererEndpoint(t *testing.T, block *common.Block, endpoint string) {
  1080  	ordererAddresses := channelconfig.OrdererAddressesValue([]string{endpoint})
  1081  	// Unwrap the layers until we reach the orderer addresses
  1082  	env, err := protoutil.ExtractEnvelope(block, 0)
  1083  	assert.NoError(t, err)
  1084  	payload, err := protoutil.UnmarshalPayload(env.Payload)
  1085  	assert.NoError(t, err)
  1086  	confEnv, err := configtx.UnmarshalConfigEnvelope(payload.Data)
  1087  	assert.NoError(t, err)
  1088  	// Replace the orderer addresses
  1089  	confEnv.Config.ChannelGroup.Values[ordererAddresses.Key()].Value = protoutil.MarshalOrPanic(ordererAddresses.Value())
  1090  	// Remove the per org addresses, if applicable
  1091  	ordererGrps := confEnv.Config.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Groups
  1092  	for _, grp := range ordererGrps {
  1093  		if grp.Values[channelconfig.EndpointsKey] == nil {
  1094  			continue
  1095  		}
  1096  		grp.Values[channelconfig.EndpointsKey].Value = nil
  1097  	}
  1098  	// And put it back into the block
  1099  	payload.Data = protoutil.MarshalOrPanic(confEnv)
  1100  	env.Payload = protoutil.MarshalOrPanic(payload)
  1101  	block.Data.Data[0] = protoutil.MarshalOrPanic(env)
  1102  }
  1103  
  1104  func injectTLSCACert(t *testing.T, block *common.Block, tlsCA []byte) {
  1105  	// Unwrap the layers until we reach the TLS CA certificates
  1106  	env, err := protoutil.ExtractEnvelope(block, 0)
  1107  	assert.NoError(t, err)
  1108  	payload, err := protoutil.UnmarshalPayload(env.Payload)
  1109  	assert.NoError(t, err)
  1110  	confEnv, err := configtx.UnmarshalConfigEnvelope(payload.Data)
  1111  	assert.NoError(t, err)
  1112  	mspKey := confEnv.Config.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Groups["OrdererOrg"].Values[channelconfig.MSPKey]
  1113  	rawMSPConfig := mspKey.Value
  1114  	mspConf := &msp.MSPConfig{}
  1115  	proto.Unmarshal(rawMSPConfig, mspConf)
  1116  	fabricMSPConf := &msp.FabricMSPConfig{}
  1117  	proto.Unmarshal(mspConf.Config, fabricMSPConf)
  1118  	// Replace the TLS root certs with the given ones
  1119  	fabricMSPConf.TlsRootCerts = [][]byte{tlsCA}
  1120  	// And put it back into the block
  1121  	mspConf.Config = protoutil.MarshalOrPanic(fabricMSPConf)
  1122  	mspKey.Value = protoutil.MarshalOrPanic(mspConf)
  1123  	payload.Data = protoutil.MarshalOrPanic(confEnv)
  1124  	env.Payload = protoutil.MarshalOrPanic(payload)
  1125  	block.Data.Data[0] = protoutil.MarshalOrPanic(env)
  1126  }
  1127  
  1128  func TestExtractGenesisBlock(t *testing.T) {
  1129  	for _, testCase := range []struct {
  1130  		name               string
  1131  		expectedErr        string
  1132  		returnedName       string
  1133  		block              *common.Block
  1134  		returnGenesisBlock bool
  1135  	}{
  1136  		{
  1137  			name:        "nil block",
  1138  			expectedErr: "nil block",
  1139  		},
  1140  		{
  1141  			name:        "no data section in block",
  1142  			expectedErr: "block data is nil",
  1143  			block:       &common.Block{},
  1144  		},
  1145  		{
  1146  			name: "corrupt envelope in block",
  1147  			expectedErr: "block data does not carry an" +
  1148  				" envelope at index 0: error unmarshaling Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)",
  1149  			block: &common.Block{
  1150  				Data: &common.BlockData{
  1151  					Data: [][]byte{{1, 2, 3}},
  1152  				},
  1153  			},
  1154  		},
  1155  		{
  1156  			name:        "corrupt payload in envelope",
  1157  			expectedErr: "error unmarshaling Payload: proto: common.Payload: illegal tag 0 (wire type 1)",
  1158  			block: &common.Block{
  1159  				Data: &common.BlockData{
  1160  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1161  						Payload: []byte{1, 2, 3},
  1162  					})},
  1163  				},
  1164  			},
  1165  		},
  1166  		{
  1167  			name:        "no header in block",
  1168  			expectedErr: "nil header in payload",
  1169  			block: &common.Block{
  1170  				Data: &common.BlockData{
  1171  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1172  						Payload: protoutil.MarshalOrPanic(&common.Payload{}),
  1173  					})},
  1174  				},
  1175  			},
  1176  		},
  1177  		{
  1178  			name: "corrupt channel header",
  1179  			expectedErr: "error unmarshaling ChannelHeader:" +
  1180  				" proto: common.ChannelHeader: illegal tag 0 (wire type 1)",
  1181  			block: &common.Block{
  1182  				Data: &common.BlockData{
  1183  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1184  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1185  							Header: &common.Header{
  1186  								ChannelHeader: []byte{1, 2, 3},
  1187  							},
  1188  						}),
  1189  					})},
  1190  				},
  1191  			},
  1192  		},
  1193  		{
  1194  			name:        "not an orderer transaction",
  1195  			expectedErr: "",
  1196  			block: &common.Block{
  1197  				Data: &common.BlockData{
  1198  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1199  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1200  							Header: &common.Header{
  1201  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1202  									Type: int32(common.HeaderType_CONFIG_UPDATE),
  1203  								}),
  1204  							},
  1205  						}),
  1206  					})},
  1207  				},
  1208  			},
  1209  		},
  1210  		{
  1211  			name:        "orderer transaction with corrupt inner envelope",
  1212  			expectedErr: "error unmarshaling Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)",
  1213  			block: &common.Block{
  1214  				Data: &common.BlockData{
  1215  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1216  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1217  							Header: &common.Header{
  1218  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1219  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1220  								}),
  1221  							},
  1222  							Data: []byte{1, 2, 3},
  1223  						}),
  1224  					})},
  1225  				},
  1226  			},
  1227  		},
  1228  		{
  1229  			name:        "orderer transaction with corrupt inner payload",
  1230  			expectedErr: "error unmarshaling Payload: proto: common.Payload: illegal tag 0 (wire type 1)",
  1231  			block: &common.Block{
  1232  				Data: &common.BlockData{
  1233  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1234  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1235  							Header: &common.Header{
  1236  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1237  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1238  								}),
  1239  							},
  1240  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1241  								Payload: []byte{1, 2, 3},
  1242  							}),
  1243  						}),
  1244  					})},
  1245  				},
  1246  			},
  1247  		},
  1248  		{
  1249  			name:        "orderer transaction with nil inner header",
  1250  			expectedErr: "inner payload's header is nil",
  1251  			block: &common.Block{
  1252  				Data: &common.BlockData{
  1253  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1254  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1255  							Header: &common.Header{
  1256  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1257  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1258  								}),
  1259  							},
  1260  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1261  								Payload: protoutil.MarshalOrPanic(&common.Payload{}),
  1262  							}),
  1263  						}),
  1264  					})},
  1265  				},
  1266  			},
  1267  		},
  1268  		{
  1269  			name:        "orderer transaction with corrupt inner channel header",
  1270  			expectedErr: "error unmarshaling ChannelHeader: proto: common.ChannelHeader: illegal tag 0 (wire type 1)",
  1271  			block: &common.Block{
  1272  				Data: &common.BlockData{
  1273  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1274  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1275  							Header: &common.Header{
  1276  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1277  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1278  								}),
  1279  							},
  1280  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1281  								Payload: protoutil.MarshalOrPanic(&common.Payload{
  1282  									Header: &common.Header{
  1283  										ChannelHeader: []byte{1, 2, 3},
  1284  									},
  1285  								}),
  1286  							}),
  1287  						}),
  1288  					})},
  1289  				},
  1290  			},
  1291  		},
  1292  		{
  1293  			name:        "orderer transaction that is not a config, but a config update",
  1294  			expectedErr: "",
  1295  			block: &common.Block{
  1296  				Data: &common.BlockData{
  1297  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1298  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1299  							Header: &common.Header{
  1300  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1301  									Type: int32(common.HeaderType_ORDERER_TRANSACTION),
  1302  								}),
  1303  							},
  1304  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1305  								Payload: protoutil.MarshalOrPanic(&common.Payload{
  1306  									Header: &common.Header{
  1307  										ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1308  											Type: int32(common.HeaderType_CONFIG_UPDATE),
  1309  										}),
  1310  									},
  1311  								}),
  1312  							}),
  1313  						}),
  1314  					})},
  1315  				},
  1316  			},
  1317  		},
  1318  		{
  1319  			expectedErr: "",
  1320  			name:        "orderer transaction that is a system channel config block",
  1321  			block: &common.Block{
  1322  				Data: &common.BlockData{
  1323  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1324  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1325  							Header: &common.Header{
  1326  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1327  									ChannelId: "systemChannel",
  1328  									Type:      int32(common.HeaderType_ORDERER_TRANSACTION),
  1329  								}),
  1330  							},
  1331  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1332  								Payload: protoutil.MarshalOrPanic(&common.Payload{
  1333  									Header: &common.Header{
  1334  										ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1335  											Type:      int32(common.HeaderType_CONFIG),
  1336  											ChannelId: "systemChannel",
  1337  										}),
  1338  									},
  1339  								}),
  1340  							}),
  1341  						}),
  1342  					})},
  1343  				},
  1344  			},
  1345  		},
  1346  		{
  1347  			name:         "orderer transaction that creates a new application channel",
  1348  			expectedErr:  "",
  1349  			returnedName: "notSystemChannel",
  1350  			block: &common.Block{
  1351  				Data: &common.BlockData{
  1352  					Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1353  						Payload: protoutil.MarshalOrPanic(&common.Payload{
  1354  							Header: &common.Header{
  1355  								ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1356  									ChannelId: "systemChannel",
  1357  									Type:      int32(common.HeaderType_ORDERER_TRANSACTION),
  1358  								}),
  1359  							},
  1360  							Data: protoutil.MarshalOrPanic(&common.Envelope{
  1361  								Payload: protoutil.MarshalOrPanic(&common.Payload{
  1362  									Header: &common.Header{
  1363  										ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1364  											Type:      int32(common.HeaderType_CONFIG),
  1365  											ChannelId: "notSystemChannel",
  1366  										}),
  1367  									},
  1368  								}),
  1369  							}),
  1370  						}),
  1371  					})},
  1372  				},
  1373  			},
  1374  			returnGenesisBlock: true,
  1375  		},
  1376  	} {
  1377  		t.Run(testCase.name, func(t *testing.T) {
  1378  			channelName, gb, err := cluster.ExtractGenesisBlock(flogging.MustGetLogger("test"), testCase.block)
  1379  			if testCase.expectedErr != "" {
  1380  				assert.EqualError(t, err, testCase.expectedErr)
  1381  			} else {
  1382  				assert.NoError(t, err)
  1383  			}
  1384  			assert.Equal(t, testCase.returnedName, channelName)
  1385  			if testCase.returnGenesisBlock {
  1386  				assert.NotNil(t, gb)
  1387  			} else {
  1388  				assert.Nil(t, gb)
  1389  			}
  1390  		})
  1391  	}
  1392  }
  1393  
  1394  func TestChannels(t *testing.T) {
  1395  	makeBlock := func(outerChannelName, innerChannelName string) *common.Block {
  1396  		return &common.Block{
  1397  			Header: &common.BlockHeader{},
  1398  			Data: &common.BlockData{
  1399  				Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{
  1400  					Payload: protoutil.MarshalOrPanic(&common.Payload{
  1401  						Header: &common.Header{
  1402  							ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1403  								ChannelId: outerChannelName,
  1404  								Type:      int32(common.HeaderType_ORDERER_TRANSACTION),
  1405  							}),
  1406  						},
  1407  						Data: protoutil.MarshalOrPanic(&common.Envelope{
  1408  							Payload: protoutil.MarshalOrPanic(&common.Payload{
  1409  								Header: &common.Header{
  1410  									ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{
  1411  										Type:      int32(common.HeaderType_CONFIG),
  1412  										ChannelId: innerChannelName,
  1413  									}),
  1414  								},
  1415  							}),
  1416  						}),
  1417  					}),
  1418  				})},
  1419  			},
  1420  		}
  1421  	}
  1422  
  1423  	for _, testCase := range []struct {
  1424  		name               string
  1425  		prepareSystemChain func(systemChain []*common.Block)
  1426  		assertion          func(t *testing.T, ci *cluster.ChainInspector)
  1427  	}{
  1428  		{
  1429  			name: "happy path - artificial blocks",
  1430  			prepareSystemChain: func(systemChain []*common.Block) {
  1431  				assignHashes(systemChain)
  1432  			},
  1433  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1434  				actual := cluster.GenesisBlocks(ci.Channels())
  1435  				// Assert that the returned channels are returned in any order
  1436  				assert.Contains(t, [][]string{{"mychannel", "mychannel2"}, {"mychannel2", "mychannel"}}, actual.Names())
  1437  			},
  1438  		},
  1439  		{
  1440  			name: "happy path - one block is not artificial but real",
  1441  			prepareSystemChain: func(systemChain []*common.Block) {
  1442  				blockbytes, err := ioutil.ReadFile(filepath.Join("testdata", "block3.pb"))
  1443  				assert.NoError(t, err)
  1444  				block := &common.Block{}
  1445  				err = proto.Unmarshal(blockbytes, block)
  1446  				assert.NoError(t, err)
  1447  
  1448  				systemChain[len(systemChain)/2-1] = block
  1449  				assignHashes(systemChain)
  1450  			},
  1451  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1452  				actual := cluster.GenesisBlocks(ci.Channels())
  1453  				// Assert that the returned channels are returned in any order
  1454  				assert.Contains(t, [][]string{{"mychannel2", "bar"}, {"bar", "mychannel2"}}, actual.Names())
  1455  			},
  1456  		},
  1457  		{
  1458  			name: "bad path - pulled chain's last block hash doesn't match the last config block",
  1459  			prepareSystemChain: func(systemChain []*common.Block) {
  1460  				assignHashes(systemChain)
  1461  				systemChain[len(systemChain)-1].Header.PreviousHash = nil
  1462  			},
  1463  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1464  				panicValue := "System channel pulled doesn't match the boot last config block:" +
  1465  					" block [2]'s hash (bc4ef5cc8a61ac0747cc82df58bac9ad3278622c1cfc7a119b9b1068e422c9f1)" +
  1466  					" mismatches block [3]'s prev block hash ()"
  1467  				assert.PanicsWithValue(t, panicValue, func() {
  1468  					ci.Channels()
  1469  				})
  1470  			},
  1471  		},
  1472  		{
  1473  			name: "bad path - hash chain mismatch",
  1474  			prepareSystemChain: func(systemChain []*common.Block) {
  1475  				assignHashes(systemChain)
  1476  				systemChain[len(systemChain)-2].Header.PreviousHash = nil
  1477  			},
  1478  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1479  				panicValue := "Claimed previous hash of block [2] is  but actual previous " +
  1480  					"hash is 920faeb0bd8a02b3f2553247359fb3b684819c75c6e5487bc7eed632841ddc5f"
  1481  				assert.PanicsWithValue(t, panicValue, func() {
  1482  					ci.Channels()
  1483  				})
  1484  			},
  1485  		},
  1486  		{
  1487  			name: "bad path - a block cannot be classified",
  1488  			prepareSystemChain: func(systemChain []*common.Block) {
  1489  				assignHashes(systemChain)
  1490  				systemChain[len(systemChain)-2].Data.Data = [][]byte{{1, 2, 3}}
  1491  			},
  1492  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1493  				panicValue := "Failed extracting channel genesis block from config block: " +
  1494  					"block data does not carry an envelope at index 0: error unmarshaling " +
  1495  					"Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)"
  1496  				assert.PanicsWithValue(t, panicValue, func() {
  1497  					ci.Channels()
  1498  				})
  1499  			},
  1500  		},
  1501  		{
  1502  			name: "bad path - failed pulling blocks",
  1503  			prepareSystemChain: func(systemChain []*common.Block) {
  1504  				assignHashes(systemChain)
  1505  				// Setting a block to nil makes the block puller return nil,
  1506  				// which signals failure of pulling a block.
  1507  				systemChain[len(systemChain)/2] = nil
  1508  			},
  1509  			assertion: func(t *testing.T, ci *cluster.ChainInspector) {
  1510  				panicValue := "Failed pulling block [2] from the system channel"
  1511  				assert.PanicsWithValue(t, panicValue, func() {
  1512  					ci.Channels()
  1513  				})
  1514  			},
  1515  		},
  1516  	} {
  1517  		t.Run(testCase.name, func(t *testing.T) {
  1518  			systemChain := []*common.Block{
  1519  				makeBlock("systemChannel", "systemChannel"),
  1520  				makeBlock("systemChannel", "mychannel"),
  1521  				makeBlock("systemChannel", "mychannel2"),
  1522  				makeBlock("systemChannel", "systemChannel"),
  1523  			}
  1524  
  1525  			for i := 0; i < len(systemChain); i++ {
  1526  				systemChain[i].Header.DataHash = protoutil.BlockDataHash(systemChain[i].Data)
  1527  				systemChain[i].Header.Number = uint64(i)
  1528  			}
  1529  			testCase.prepareSystemChain(systemChain)
  1530  			puller := &mocks.ChainPuller{}
  1531  			puller.On("Close")
  1532  			for seq := uint64(0); int(seq) < len(systemChain)-1; seq++ {
  1533  				puller.On("PullBlock", seq).Return(systemChain[int(seq)])
  1534  			}
  1535  
  1536  			ci := &cluster.ChainInspector{
  1537  				Logger:          flogging.MustGetLogger("test"),
  1538  				Puller:          puller,
  1539  				LastConfigBlock: systemChain[len(systemChain)-1],
  1540  			}
  1541  			defer puller.AssertNumberOfCalls(t, "Close", 1)
  1542  			defer ci.Close()
  1543  			testCase.assertion(t, ci)
  1544  		})
  1545  	}
  1546  }
  1547  
  1548  var fakeGB = &common.Block{
  1549  	Header: &common.BlockHeader{},
  1550  	Metadata: &common.BlockMetadata{
  1551  		Metadata: [][]byte{{}, {}, {}, {}},
  1552  	},
  1553  	Data: &common.BlockData{
  1554  		Data: [][]byte{
  1555  			protoutil.MarshalOrPanic(&common.Envelope{
  1556  				Payload: protoutil.MarshalOrPanic(&common.Envelope{
  1557  					Payload: protoutil.MarshalOrPanic(&common.Config{
  1558  						Sequence: 1,
  1559  					}),
  1560  				}),
  1561  			}),
  1562  		},
  1563  	},
  1564  }
  1565  
  1566  func simulateNonParticipantChannelPull(osn *deliverServer) {
  1567  	lastBlock := protoutil.NewBlock(1, nil)
  1568  	lastBlock.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{
  1569  		Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: 0}),
  1570  	})
  1571  	// We first present a channel with a last block of 'lastBlock', that points to
  1572  	// the genesis block
  1573  	osn.addExpectProbeAssert()
  1574  	osn.blockResponses <- &orderer.DeliverResponse{
  1575  		Type: &orderer.DeliverResponse_Block{Block: lastBlock},
  1576  	}
  1577  	osn.addExpectProbeAssert()
  1578  	osn.blockResponses <- &orderer.DeliverResponse{
  1579  		Type: &orderer.DeliverResponse_Block{Block: lastBlock},
  1580  	}
  1581  	osn.addExpectPullAssert(1)
  1582  	osn.blockResponses <- &orderer.DeliverResponse{
  1583  		Type: &orderer.DeliverResponse_Block{Block: lastBlock},
  1584  	}
  1585  	osn.blockResponses <- nil
  1586  
  1587  	// and make it send back the genesis block.
  1588  	// First send is for probing,
  1589  	osn.addExpectProbeAssert()
  1590  	osn.blockResponses <- &orderer.DeliverResponse{
  1591  		Type: &orderer.DeliverResponse_Block{Block: fakeGB},
  1592  	}
  1593  	osn.addExpectPullAssert(0)
  1594  	// and the second one sends the actual block itself downstream
  1595  	osn.blockResponses <- &orderer.DeliverResponse{
  1596  		Type: &orderer.DeliverResponse_Block{Block: fakeGB},
  1597  	}
  1598  
  1599  	osn.blockResponses <- nil
  1600  }
  1601  
  1602  func TestFilter(t *testing.T) {
  1603  	logger := flogging.MustGetLogger("test")
  1604  	logger = logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
  1605  		assert.Equal(t, "Channel foo shouldn't be pulled. Skipping it", entry.Message)
  1606  		return nil
  1607  	}))
  1608  
  1609  	r := &cluster.Replicator{
  1610  		Filter: func(_ string) bool {
  1611  			return false
  1612  		},
  1613  		Logger: logger,
  1614  	}
  1615  	assert.Equal(t, cluster.ErrSkipped, r.PullChannel("foo"))
  1616  }