github.com/ewagmig/fabric@v2.1.1+incompatible/gossip/state/state_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package state
     8  
     9  import (
    10  	"bytes"
    11  	"errors"
    12  	"fmt"
    13  	"math/rand"
    14  	"net"
    15  	"sync"
    16  	"sync/atomic"
    17  	"testing"
    18  	"time"
    19  
    20  	pb "github.com/golang/protobuf/proto"
    21  	pcomm "github.com/hyperledger/fabric-protos-go/common"
    22  	proto "github.com/hyperledger/fabric-protos-go/gossip"
    23  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    24  	tspb "github.com/hyperledger/fabric-protos-go/transientstore"
    25  	"github.com/hyperledger/fabric/bccsp/factory"
    26  	"github.com/hyperledger/fabric/common/configtx/test"
    27  	errors2 "github.com/hyperledger/fabric/common/errors"
    28  	"github.com/hyperledger/fabric/common/flogging"
    29  	"github.com/hyperledger/fabric/common/metrics/disabled"
    30  	"github.com/hyperledger/fabric/core/committer"
    31  	"github.com/hyperledger/fabric/core/committer/txvalidator"
    32  	"github.com/hyperledger/fabric/core/ledger"
    33  	"github.com/hyperledger/fabric/core/mocks/validator"
    34  	"github.com/hyperledger/fabric/core/transientstore"
    35  	"github.com/hyperledger/fabric/gossip/api"
    36  	"github.com/hyperledger/fabric/gossip/comm"
    37  	"github.com/hyperledger/fabric/gossip/common"
    38  	"github.com/hyperledger/fabric/gossip/discovery"
    39  	"github.com/hyperledger/fabric/gossip/gossip"
    40  	"github.com/hyperledger/fabric/gossip/gossip/algo"
    41  	"github.com/hyperledger/fabric/gossip/gossip/channel"
    42  	"github.com/hyperledger/fabric/gossip/metrics"
    43  	"github.com/hyperledger/fabric/gossip/privdata"
    44  	capabilitymock "github.com/hyperledger/fabric/gossip/privdata/mocks"
    45  	"github.com/hyperledger/fabric/gossip/protoext"
    46  	"github.com/hyperledger/fabric/gossip/state/mocks"
    47  	gossiputil "github.com/hyperledger/fabric/gossip/util"
    48  	gutil "github.com/hyperledger/fabric/gossip/util"
    49  	corecomm "github.com/hyperledger/fabric/internal/pkg/comm"
    50  	"github.com/hyperledger/fabric/protoutil"
    51  	"github.com/onsi/gomega"
    52  	"github.com/onsi/gomega/gbytes"
    53  	"github.com/stretchr/testify/assert"
    54  	"github.com/stretchr/testify/mock"
    55  	"go.uber.org/zap"
    56  	"go.uber.org/zap/zapcore"
    57  )
    58  
    59  var (
    60  	orgID = []byte("ORG1")
    61  
    62  	noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error {
    63  		return nil
    64  	}
    65  )
    66  
    67  type peerIdentityAcceptor func(identity api.PeerIdentityType) error
    68  
    69  type joinChanMsg struct {
    70  }
    71  
    72  func init() {
    73  	gutil.SetupTestLogging()
    74  	factory.InitFactories(nil)
    75  }
    76  
    77  // SequenceNumber returns the sequence number of the block that the message
    78  // is derived from
    79  func (*joinChanMsg) SequenceNumber() uint64 {
    80  	return uint64(time.Now().UnixNano())
    81  }
    82  
    83  // Members returns the organizations of the channel
    84  func (jcm *joinChanMsg) Members() []api.OrgIdentityType {
    85  	return []api.OrgIdentityType{orgID}
    86  }
    87  
    88  // AnchorPeersOf returns the anchor peers of the given organization
    89  func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
    90  	return []api.AnchorPeer{}
    91  }
    92  
    93  type orgCryptoService struct {
    94  }
    95  
    96  // OrgByPeerIdentity returns the OrgIdentityType
    97  // of a given peer identity
    98  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
    99  	return orgID
   100  }
   101  
   102  // Verify verifies a JoinChannelMessage, returns nil on success,
   103  // and an error on failure
   104  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
   105  	return nil
   106  }
   107  
   108  type cryptoServiceMock struct {
   109  	acceptor peerIdentityAcceptor
   110  }
   111  
   112  func (cryptoServiceMock) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) {
   113  	return time.Now().Add(time.Hour), nil
   114  }
   115  
   116  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   117  func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType {
   118  	return common.PKIidType(peerIdentity)
   119  }
   120  
   121  // VerifyBlock returns nil if the block is properly signed,
   122  // else returns error
   123  func (*cryptoServiceMock) VerifyBlock(channelID common.ChannelID, seqNum uint64, signedBlock *pcomm.Block) error {
   124  	return nil
   125  }
   126  
   127  // Sign signs msg with this peer's signing key and outputs
   128  // the signature if no error occurred.
   129  func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) {
   130  	clone := make([]byte, len(msg))
   131  	copy(clone, msg)
   132  	return clone, nil
   133  }
   134  
   135  // Verify checks that signature is a valid signature of message under a peer's verification key.
   136  // If the verification succeeded, Verify returns nil meaning no error occurred.
   137  // If peerCert is nil, then the signature is verified against this peer's verification key.
   138  func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   139  	equal := bytes.Equal(signature, message)
   140  	if !equal {
   141  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   142  	}
   143  	return nil
   144  }
   145  
   146  // VerifyByChannel checks that signature is a valid signature of message
   147  // under a peer's verification key, but also in the context of a specific channel.
   148  // If the verification succeeded, Verify returns nil meaning no error occurred.
   149  // If peerIdentity is nil, then the signature is verified against this peer's verification key.
   150  func (cs *cryptoServiceMock) VerifyByChannel(channelID common.ChannelID, peerIdentity api.PeerIdentityType, signature, message []byte) error {
   151  	return cs.acceptor(peerIdentity)
   152  }
   153  
   154  func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   155  	return nil
   156  }
   157  
   158  func bootPeersWithPorts(ports ...int) []string {
   159  	var peers []string
   160  	for _, port := range ports {
   161  		peers = append(peers, fmt.Sprintf("127.0.0.1:%d", port))
   162  	}
   163  	return peers
   164  }
   165  
   166  type peerNodeGossipSupport interface {
   167  	GossipAdapter
   168  	Stop()
   169  	JoinChan(joinMsg api.JoinChannelMessage, channelID common.ChannelID)
   170  }
   171  
   172  // Simple presentation of peer which includes only
   173  // communication module, gossip and state transfer
   174  type peerNode struct {
   175  	port   int
   176  	g      peerNodeGossipSupport
   177  	s      *GossipStateProviderImpl
   178  	cs     *cryptoServiceMock
   179  	commit committer.Committer
   180  	grpc   *corecomm.GRPCServer
   181  }
   182  
   183  // Shutting down all modules used
   184  func (node *peerNode) shutdown() {
   185  	node.s.Stop()
   186  	node.g.Stop()
   187  	node.grpc.Stop()
   188  }
   189  
   190  type mockCommitter struct {
   191  	*mock.Mock
   192  	sync.Mutex
   193  }
   194  
   195  func (mc *mockCommitter) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   196  	args := mc.Called()
   197  	return args.Get(0).(ledger.ConfigHistoryRetriever), args.Error(1)
   198  }
   199  
   200  func (mc *mockCommitter) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   201  	args := mc.Called(blockNum, filter)
   202  	return args.Get(0).([]*ledger.TxPvtData), args.Error(1)
   203  }
   204  
   205  func (mc *mockCommitter) CommitLegacy(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   206  	mc.Lock()
   207  	m := mc.Mock
   208  	mc.Unlock()
   209  	m.Called(blockAndPvtData.Block)
   210  	return nil
   211  }
   212  
   213  func (mc *mockCommitter) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) {
   214  	mc.Lock()
   215  	m := mc.Mock
   216  	mc.Unlock()
   217  
   218  	args := m.Called(seqNum)
   219  	return args.Get(0).(*ledger.BlockAndPvtData), args.Error(1)
   220  }
   221  
   222  func (mc *mockCommitter) LedgerHeight() (uint64, error) {
   223  	mc.Lock()
   224  	m := mc.Mock
   225  	mc.Unlock()
   226  	args := m.Called()
   227  	if args.Get(1) == nil {
   228  		return args.Get(0).(uint64), nil
   229  	}
   230  	return args.Get(0).(uint64), args.Get(1).(error)
   231  }
   232  
   233  func (mc *mockCommitter) DoesPvtDataInfoExistInLedger(blkNum uint64) (bool, error) {
   234  	mc.Lock()
   235  	m := mc.Mock
   236  	mc.Unlock()
   237  	args := m.Called(blkNum)
   238  	return args.Get(0).(bool), args.Error(1)
   239  }
   240  
   241  func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block {
   242  	mc.Lock()
   243  	m := mc.Mock
   244  	mc.Unlock()
   245  
   246  	if m.Called(blockSeqs).Get(0) == nil {
   247  		return nil
   248  	}
   249  	return m.Called(blockSeqs).Get(0).([]*pcomm.Block)
   250  }
   251  
   252  func (*mockCommitter) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   253  	panic("implement me")
   254  }
   255  
   256  func (*mockCommitter) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   257  	panic("implement me")
   258  }
   259  
   260  func (*mockCommitter) Close() {
   261  }
   262  
   263  type ramLedger struct {
   264  	ledger map[uint64]*ledger.BlockAndPvtData
   265  	sync.RWMutex
   266  }
   267  
   268  func (mock *ramLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   269  	panic("implement me")
   270  }
   271  
   272  func (mock *ramLedger) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   273  	panic("implement me")
   274  }
   275  
   276  func (mock *ramLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   277  	panic("implement me")
   278  }
   279  
   280  func (mock *ramLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) {
   281  	mock.RLock()
   282  	defer mock.RUnlock()
   283  
   284  	if block, ok := mock.ledger[blockNum]; !ok {
   285  		return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNum))
   286  	} else {
   287  		return block, nil
   288  	}
   289  }
   290  
   291  func (mock *ramLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   292  	panic("implement me")
   293  }
   294  
   295  func (mock *ramLedger) CommitLegacy(blockAndPvtdata *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   296  	mock.Lock()
   297  	defer mock.Unlock()
   298  
   299  	if blockAndPvtdata != nil && blockAndPvtdata.Block != nil {
   300  		mock.ledger[blockAndPvtdata.Block.Header.Number] = blockAndPvtdata
   301  		return nil
   302  	}
   303  	return errors.New("invalid input parameters for block and private data param")
   304  }
   305  
   306  func (mock *ramLedger) GetBlockchainInfo() (*pcomm.BlockchainInfo, error) {
   307  	mock.RLock()
   308  	defer mock.RUnlock()
   309  
   310  	currentBlock := mock.ledger[uint64(len(mock.ledger)-1)].Block
   311  	return &pcomm.BlockchainInfo{
   312  		Height:            currentBlock.Header.Number + 1,
   313  		CurrentBlockHash:  protoutil.BlockHeaderHash(currentBlock.Header),
   314  		PreviousBlockHash: currentBlock.Header.PreviousHash,
   315  	}, nil
   316  }
   317  
   318  func (mock *ramLedger) DoesPvtDataInfoExist(blkNum uint64) (bool, error) {
   319  	return false, nil
   320  }
   321  
   322  func (mock *ramLedger) GetBlockByNumber(blockNumber uint64) (*pcomm.Block, error) {
   323  	mock.RLock()
   324  	defer mock.RUnlock()
   325  
   326  	if blockAndPvtData, ok := mock.ledger[blockNumber]; !ok {
   327  		return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNumber))
   328  	} else {
   329  		return blockAndPvtData.Block, nil
   330  	}
   331  }
   332  
   333  func (mock *ramLedger) Close() {
   334  
   335  }
   336  
   337  // Create new instance of KVLedger to be used for testing
   338  func newCommitter() committer.Committer {
   339  	cb, _ := test.MakeGenesisBlock("testChain")
   340  	ldgr := &ramLedger{
   341  		ledger: make(map[uint64]*ledger.BlockAndPvtData),
   342  	}
   343  	ldgr.CommitLegacy(&ledger.BlockAndPvtData{Block: cb}, &ledger.CommitOptions{})
   344  	return committer.NewLedgerCommitter(ldgr)
   345  }
   346  
   347  func newPeerNodeWithGossip(id int, committer committer.Committer,
   348  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, bootPorts ...int) *peerNode {
   349  	logger := flogging.MustGetLogger(gutil.StateLogger)
   350  	return newPeerNodeWithGossipWithValidator(logger, id, committer, acceptor, g, &validator.MockValidator{}, bootPorts...)
   351  }
   352  
   353  // Constructing pseudo peer node, simulating only gossip and state transfer part
   354  func newPeerNodeWithGossipWithValidatorWithMetrics(logger gutil.Logger, id int, committer committer.Committer,
   355  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, v txvalidator.Validator,
   356  	gossipMetrics *metrics.GossipMetrics, bootPorts ...int) (node *peerNode, port int) {
   357  	cs := &cryptoServiceMock{acceptor: acceptor}
   358  	port, gRPCServer, certs, secureDialOpts, _ := gossiputil.CreateGRPCLayer()
   359  
   360  	if g == nil {
   361  		config := &gossip.Config{
   362  			BindPort:                     port,
   363  			BootstrapPeers:               bootPeersWithPorts(bootPorts...),
   364  			ID:                           fmt.Sprintf("p%d", id),
   365  			MaxBlockCountToStore:         0,
   366  			MaxPropagationBurstLatency:   time.Duration(10) * time.Millisecond,
   367  			MaxPropagationBurstSize:      10,
   368  			PropagateIterations:          1,
   369  			PropagatePeerNum:             3,
   370  			PullInterval:                 time.Duration(4) * time.Second,
   371  			PullPeerNum:                  5,
   372  			InternalEndpoint:             fmt.Sprintf("127.0.0.1:%d", port),
   373  			PublishCertPeriod:            10 * time.Second,
   374  			RequestStateInfoInterval:     4 * time.Second,
   375  			PublishStateInfoInterval:     4 * time.Second,
   376  			TimeForMembershipTracker:     5 * time.Second,
   377  			TLSCerts:                     certs,
   378  			DigestWaitTime:               algo.DefDigestWaitTime,
   379  			RequestWaitTime:              algo.DefRequestWaitTime,
   380  			ResponseWaitTime:             algo.DefResponseWaitTime,
   381  			DialTimeout:                  comm.DefDialTimeout,
   382  			ConnTimeout:                  comm.DefConnTimeout,
   383  			RecvBuffSize:                 comm.DefRecvBuffSize,
   384  			SendBuffSize:                 comm.DefSendBuffSize,
   385  			MsgExpirationTimeout:         channel.DefMsgExpirationTimeout,
   386  			AliveTimeInterval:            discovery.DefAliveTimeInterval,
   387  			AliveExpirationTimeout:       discovery.DefAliveExpirationTimeout,
   388  			AliveExpirationCheckInterval: discovery.DefAliveExpirationCheckInterval,
   389  			ReconnectInterval:            discovery.DefReconnectInterval,
   390  		}
   391  
   392  		selfID := api.PeerIdentityType(config.InternalEndpoint)
   393  		mcs := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
   394  		g = gossip.New(config, gRPCServer.Server(), &orgCryptoService{}, mcs, selfID, secureDialOpts, gossipMetrics)
   395  	}
   396  
   397  	g.JoinChan(&joinChanMsg{}, common.ChannelID("testchannelid"))
   398  
   399  	go func() {
   400  		gRPCServer.Start()
   401  	}()
   402  
   403  	// Initialize pseudo peer simulator, which has only three
   404  	// basic parts
   405  
   406  	servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: cs}
   407  	coordConfig := privdata.CoordinatorConfig{
   408  		PullRetryThreshold:             0,
   409  		TransientBlockRetention:        1000,
   410  		SkipPullingInvalidTransactions: false,
   411  	}
   412  
   413  	mspID := "Org1MSP"
   414  	capabilityProvider := &capabilitymock.CapabilityProvider{}
   415  	appCapability := &capabilitymock.AppCapabilities{}
   416  	capabilityProvider.On("Capabilities").Return(appCapability)
   417  	appCapability.On("StorePvtDataOfInvalidTx").Return(true)
   418  	coord := privdata.NewCoordinator(mspID, privdata.Support{
   419  		Validator:          v,
   420  		Committer:          committer,
   421  		CapabilityProvider: capabilityProvider,
   422  	}, &transientstore.Store{}, protoutil.SignedData{}, gossipMetrics.PrivdataMetrics, coordConfig, nil)
   423  	stateConfig := &StateConfig{
   424  		StateCheckInterval:   DefStateCheckInterval,
   425  		StateResponseTimeout: DefStateResponseTimeout,
   426  		StateBatchSize:       DefStateBatchSize,
   427  		StateMaxRetries:      DefStateMaxRetries,
   428  		StateBlockBufferSize: DefStateBlockBufferSize,
   429  		StateChannelSize:     DefStateChannelSize,
   430  		StateEnabled:         DefStateEnabled,
   431  	}
   432  	sp := NewGossipStateProvider(logger, "testchannelid", servicesAdapater, coord, gossipMetrics.StateMetrics, blocking, stateConfig)
   433  	if sp == nil {
   434  		gRPCServer.Stop()
   435  		return nil, port
   436  	}
   437  
   438  	return &peerNode{
   439  		port:   port,
   440  		g:      g,
   441  		s:      sp.(*GossipStateProviderImpl),
   442  		commit: committer,
   443  		cs:     cs,
   444  		grpc:   gRPCServer,
   445  	}, port
   446  
   447  }
   448  
   449  // add metrics provider for metrics testing
   450  func newPeerNodeWithGossipWithMetrics(id int, committer committer.Committer,
   451  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, gossipMetrics *metrics.GossipMetrics) *peerNode {
   452  	logger := flogging.MustGetLogger(gutil.StateLogger)
   453  	node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(logger, id, committer, acceptor, g,
   454  		&validator.MockValidator{}, gossipMetrics)
   455  	return node
   456  }
   457  
   458  // Constructing pseudo peer node, simulating only gossip and state transfer part
   459  func newPeerNodeWithGossipWithValidator(logger gutil.Logger, id int, committer committer.Committer,
   460  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, v txvalidator.Validator, bootPorts ...int) *peerNode {
   461  	gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
   462  	node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(logger, id, committer, acceptor, g, v, gossipMetrics, bootPorts...)
   463  	return node
   464  }
   465  
   466  // Constructing pseudo peer node, simulating only gossip and state transfer part
   467  func newPeerNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor, bootPorts ...int) *peerNode {
   468  	return newPeerNodeWithGossip(id, committer, acceptor, nil, bootPorts...)
   469  }
   470  
   471  // Constructing pseudo boot node, simulating only gossip and state transfer part, return port
   472  func newBootNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor) (node *peerNode, port int) {
   473  	v := &validator.MockValidator{}
   474  	gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
   475  	logger := flogging.MustGetLogger(gutil.StateLogger)
   476  	return newPeerNodeWithGossipWithValidatorWithMetrics(logger, id, committer, acceptor, nil, v, gossipMetrics)
   477  }
   478  
   479  func TestNilDirectMsg(t *testing.T) {
   480  	t.Parallel()
   481  	mc := &mockCommitter{Mock: &mock.Mock{}}
   482  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   483  	g := &mocks.GossipMock{}
   484  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   485  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   486  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   487  	defer p.shutdown()
   488  	p.s.handleStateRequest(nil)
   489  	p.s.directMessage(nil)
   490  	sMsg, _ := protoext.NoopSign(p.s.stateRequestMessage(uint64(10), uint64(8)))
   491  	req := &comm.ReceivedMessageImpl{
   492  		SignedGossipMessage: sMsg,
   493  	}
   494  	p.s.directMessage(req)
   495  }
   496  
   497  func TestNilAddPayload(t *testing.T) {
   498  	t.Parallel()
   499  	mc := &mockCommitter{Mock: &mock.Mock{}}
   500  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   501  	g := &mocks.GossipMock{}
   502  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   503  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   504  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   505  	defer p.shutdown()
   506  	err := p.s.AddPayload(nil)
   507  	assert.Error(t, err)
   508  	assert.Contains(t, err.Error(), "nil")
   509  }
   510  
   511  func TestAddPayloadLedgerUnavailable(t *testing.T) {
   512  	t.Parallel()
   513  	mc := &mockCommitter{Mock: &mock.Mock{}}
   514  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   515  	g := &mocks.GossipMock{}
   516  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   517  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   518  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   519  	defer p.shutdown()
   520  	// Simulate a problem in the ledger
   521  	failedLedger := mock.Mock{}
   522  	failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger"))
   523  	mc.Lock()
   524  	mc.Mock = &failedLedger
   525  	mc.Unlock()
   526  
   527  	rawblock := protoutil.NewBlock(uint64(1), []byte{})
   528  	b, _ := pb.Marshal(rawblock)
   529  	err := p.s.AddPayload(&proto.Payload{
   530  		SeqNum: uint64(1),
   531  		Data:   b,
   532  	})
   533  	assert.Error(t, err)
   534  	assert.Contains(t, err.Error(), "Failed obtaining ledger height")
   535  	assert.Contains(t, err.Error(), "cannot query ledger")
   536  }
   537  
   538  func TestLargeBlockGap(t *testing.T) {
   539  	// Scenario: the peer knows of a peer who has a ledger height much higher
   540  	// than itself (500 blocks higher).
   541  	// The peer needs to ask blocks in a way such that the size of the payload buffer
   542  	// never rises above a certain threshold.
   543  	t.Parallel()
   544  	mc := &mockCommitter{Mock: &mock.Mock{}}
   545  	blocksPassedToLedger := make(chan uint64, 200)
   546  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   547  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   548  	})
   549  	msgsFromPeer := make(chan protoext.ReceivedMessage)
   550  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   551  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   552  	g := &mocks.GossipMock{}
   553  	membership := []discovery.NetworkMember{
   554  		{
   555  			PKIid:    common.PKIidType("a"),
   556  			Endpoint: "a",
   557  			Properties: &proto.Properties{
   558  				LedgerHeight: 500,
   559  			},
   560  		}}
   561  	g.On("PeersOfChannel", mock.Anything).Return(membership)
   562  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   563  	g.On("Accept", mock.Anything, true).Return(nil, msgsFromPeer)
   564  	g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   565  		msg := arguments.Get(0).(*proto.GossipMessage)
   566  		// The peer requested a state request
   567  		req := msg.GetStateRequest()
   568  		// Construct a skeleton for the response
   569  		res := &proto.GossipMessage{
   570  			Nonce:   msg.Nonce,
   571  			Channel: []byte("testchannelid"),
   572  			Content: &proto.GossipMessage_StateResponse{
   573  				StateResponse: &proto.RemoteStateResponse{},
   574  			},
   575  		}
   576  		// Populate the response with payloads according to what the peer asked
   577  		for seq := req.StartSeqNum; seq <= req.EndSeqNum; seq++ {
   578  			rawblock := protoutil.NewBlock(seq, []byte{})
   579  			b, _ := pb.Marshal(rawblock)
   580  			payload := &proto.Payload{
   581  				SeqNum: seq,
   582  				Data:   b,
   583  			}
   584  			res.GetStateResponse().Payloads = append(res.GetStateResponse().Payloads, payload)
   585  		}
   586  		// Finally, send the response down the channel the peer expects to receive it from
   587  		sMsg, _ := protoext.NoopSign(res)
   588  		msgsFromPeer <- &comm.ReceivedMessageImpl{
   589  			SignedGossipMessage: sMsg,
   590  		}
   591  	})
   592  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   593  	defer p.shutdown()
   594  
   595  	// Process blocks at a speed of 20 Millisecond for each block.
   596  	// The imaginative peer that responds to state
   597  	// If the payload buffer expands above defMaxBlockDistance*2 + defAntiEntropyBatchSize blocks, fail the test
   598  	blockProcessingTime := 20 * time.Millisecond // 10 seconds for total 500 blocks
   599  	expectedSequence := 1
   600  	for expectedSequence < 500 {
   601  		blockSeq := <-blocksPassedToLedger
   602  		assert.Equal(t, expectedSequence, int(blockSeq))
   603  		// Ensure payload buffer isn't over-populated
   604  		assert.True(t, p.s.payloads.Size() <= defMaxBlockDistance*2+defAntiEntropyBatchSize, "payload buffer size is %d", p.s.payloads.Size())
   605  		expectedSequence++
   606  		time.Sleep(blockProcessingTime)
   607  	}
   608  }
   609  
   610  func TestOverPopulation(t *testing.T) {
   611  	// Scenario: Add to the state provider blocks
   612  	// with a gap in between, and ensure that the payload buffer
   613  	// rejects blocks starting if the distance between the ledger height to the latest
   614  	// block it contains is bigger than defMaxBlockDistance.
   615  	t.Parallel()
   616  	mc := &mockCommitter{Mock: &mock.Mock{}}
   617  	blocksPassedToLedger := make(chan uint64, 10)
   618  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   619  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   620  	})
   621  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   622  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   623  	g := &mocks.GossipMock{}
   624  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   625  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   626  	p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
   627  	defer p.shutdown()
   628  
   629  	// Add some blocks in a sequential manner and make sure it works
   630  	for i := 1; i <= 4; i++ {
   631  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   632  		b, _ := pb.Marshal(rawblock)
   633  		assert.NoError(t, p.s.addPayload(&proto.Payload{
   634  			SeqNum: uint64(i),
   635  			Data:   b,
   636  		}, nonBlocking))
   637  	}
   638  
   639  	// Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9]
   640  	// Should succeed
   641  	for i := 10; i <= defMaxBlockDistance; i++ {
   642  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   643  		b, _ := pb.Marshal(rawblock)
   644  		assert.NoError(t, p.s.addPayload(&proto.Payload{
   645  			SeqNum: uint64(i),
   646  			Data:   b,
   647  		}, nonBlocking))
   648  	}
   649  
   650  	// Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10
   651  	// Should fail.
   652  	for i := defMaxBlockDistance + 1; i <= defMaxBlockDistance*10; i++ {
   653  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   654  		b, _ := pb.Marshal(rawblock)
   655  		assert.Error(t, p.s.addPayload(&proto.Payload{
   656  			SeqNum: uint64(i),
   657  			Data:   b,
   658  		}, nonBlocking))
   659  	}
   660  
   661  	// Ensure only blocks 1-4 were passed to the ledger
   662  	close(blocksPassedToLedger)
   663  	i := 1
   664  	for seq := range blocksPassedToLedger {
   665  		assert.Equal(t, uint64(i), seq)
   666  		i++
   667  	}
   668  	assert.Equal(t, 5, i)
   669  
   670  	// Ensure we don't store too many blocks in memory
   671  	sp := p.s
   672  	assert.True(t, sp.payloads.Size() < defMaxBlockDistance)
   673  }
   674  
   675  func TestBlockingEnqueue(t *testing.T) {
   676  	// Scenario: In parallel, get blocks from gossip and from the orderer.
   677  	// The blocks from the orderer we get are X2 times the amount of blocks from gossip.
   678  	// The blocks we get from gossip are random indices, to maximize disruption.
   679  	t.Parallel()
   680  	mc := &mockCommitter{Mock: &mock.Mock{}}
   681  	blocksPassedToLedger := make(chan uint64, 10)
   682  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   683  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   684  	})
   685  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   686  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   687  	g := &mocks.GossipMock{}
   688  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   689  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   690  	p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
   691  	defer p.shutdown()
   692  
   693  	numBlocksReceived := 500
   694  	receivedBlockCount := 0
   695  	// Get a block from the orderer every 1ms
   696  	go func() {
   697  		for i := 1; i <= numBlocksReceived; i++ {
   698  			rawblock := protoutil.NewBlock(uint64(i), []byte{})
   699  			b, _ := pb.Marshal(rawblock)
   700  			block := &proto.Payload{
   701  				SeqNum: uint64(i),
   702  				Data:   b,
   703  			}
   704  			p.s.AddPayload(block)
   705  			time.Sleep(time.Millisecond)
   706  		}
   707  	}()
   708  
   709  	// Get a block from gossip every 1ms too
   710  	go func() {
   711  		rand.Seed(time.Now().UnixNano())
   712  		for i := 1; i <= numBlocksReceived/2; i++ {
   713  			blockSeq := rand.Intn(numBlocksReceived)
   714  			rawblock := protoutil.NewBlock(uint64(blockSeq), []byte{})
   715  			b, _ := pb.Marshal(rawblock)
   716  			block := &proto.Payload{
   717  				SeqNum: uint64(blockSeq),
   718  				Data:   b,
   719  			}
   720  			p.s.addPayload(block, nonBlocking)
   721  			time.Sleep(time.Millisecond)
   722  		}
   723  	}()
   724  
   725  	for {
   726  		receivedBlock := <-blocksPassedToLedger
   727  		receivedBlockCount++
   728  		m := &mock.Mock{}
   729  		m.On("LedgerHeight", mock.Anything).Return(receivedBlock, nil)
   730  		m.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   731  		m.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   732  			blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   733  		})
   734  		mc.Lock()
   735  		mc.Mock = m
   736  		mc.Unlock()
   737  		assert.Equal(t, receivedBlock, uint64(receivedBlockCount))
   738  		if int(receivedBlockCount) == numBlocksReceived {
   739  			break
   740  		}
   741  		time.Sleep(time.Millisecond * 10)
   742  	}
   743  }
   744  
   745  func TestHaltChainProcessing(t *testing.T) {
   746  	gossipChannel := func(c chan *proto.GossipMessage) <-chan *proto.GossipMessage {
   747  		return c
   748  	}
   749  	makeBlock := func(seq int) []byte {
   750  		b := &pcomm.Block{
   751  			Header: &pcomm.BlockHeader{
   752  				Number: uint64(seq),
   753  			},
   754  			Data: &pcomm.BlockData{
   755  				Data: [][]byte{},
   756  			},
   757  			Metadata: &pcomm.BlockMetadata{
   758  				Metadata: [][]byte{
   759  					{}, {}, {}, {},
   760  				},
   761  			},
   762  		}
   763  		data, _ := pb.Marshal(b)
   764  		return data
   765  	}
   766  	newBlockMsg := func(i int) *proto.GossipMessage {
   767  		return &proto.GossipMessage{
   768  			Channel: []byte("testchannelid"),
   769  			Content: &proto.GossipMessage_DataMsg{
   770  				DataMsg: &proto.DataMessage{
   771  					Payload: &proto.Payload{
   772  						SeqNum: uint64(i),
   773  						Data:   makeBlock(i),
   774  					},
   775  				},
   776  			},
   777  		}
   778  	}
   779  
   780  	mc := &mockCommitter{Mock: &mock.Mock{}}
   781  	mc.On("CommitLegacy", mock.Anything)
   782  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   783  	g := &mocks.GossipMock{}
   784  	gossipMsgs := make(chan *proto.GossipMessage)
   785  
   786  	g.On("Accept", mock.Anything, false).Return(gossipChannel(gossipMsgs), nil)
   787  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   788  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   789  
   790  	v := &validator.MockValidator{}
   791  	v.On("Validate").Return(&errors2.VSCCExecutionFailureError{
   792  		Err: errors.New("foobar"),
   793  	}).Once()
   794  
   795  	buf := gbytes.NewBuffer()
   796  
   797  	logger := flogging.MustGetLogger(gutil.StateLogger).WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
   798  		buf.Write([]byte(entry.Message))
   799  		buf.Write([]byte("\n"))
   800  		return nil
   801  	}))
   802  	peerNode := newPeerNodeWithGossipWithValidator(logger, 0, mc, noopPeerIdentityAcceptor, g, v)
   803  	defer peerNode.shutdown()
   804  	gossipMsgs <- newBlockMsg(1)
   805  
   806  	gom := gomega.NewGomegaWithT(t)
   807  	gom.Eventually(buf, time.Minute).Should(gbytes.Say("Failed executing VSCC due to foobar"))
   808  	gom.Eventually(buf, time.Minute).Should(gbytes.Say("Aborting chain processing"))
   809  }
   810  
   811  func TestFailures(t *testing.T) {
   812  	t.Parallel()
   813  	mc := &mockCommitter{Mock: &mock.Mock{}}
   814  	mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil)
   815  	g := &mocks.GossipMock{}
   816  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   817  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   818  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   819  	assert.Panics(t, func() {
   820  		newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   821  	})
   822  	// Reprogram mock
   823  	mc.Mock = &mock.Mock{}
   824  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger"))
   825  	assert.Nil(t, newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g))
   826  }
   827  
   828  func TestGossipReception(t *testing.T) {
   829  	t.Parallel()
   830  	signalChan := make(chan struct{})
   831  	rawblock := &pcomm.Block{
   832  		Header: &pcomm.BlockHeader{
   833  			Number: uint64(1),
   834  		},
   835  		Data: &pcomm.BlockData{
   836  			Data: [][]byte{},
   837  		},
   838  		Metadata: &pcomm.BlockMetadata{
   839  			Metadata: [][]byte{
   840  				{}, {}, {}, {},
   841  			},
   842  		},
   843  	}
   844  	b, _ := pb.Marshal(rawblock)
   845  
   846  	newMsg := func(channel string) *proto.GossipMessage {
   847  		{
   848  			return &proto.GossipMessage{
   849  				Channel: []byte(channel),
   850  				Content: &proto.GossipMessage_DataMsg{
   851  					DataMsg: &proto.DataMessage{
   852  						Payload: &proto.Payload{
   853  							SeqNum: 1,
   854  							Data:   b,
   855  						},
   856  					},
   857  				},
   858  			}
   859  		}
   860  	}
   861  
   862  	createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage {
   863  		c := make(chan *proto.GossipMessage)
   864  
   865  		go func(c chan *proto.GossipMessage) {
   866  			// Wait for Accept() to be called
   867  			<-signalChan
   868  			// Simulate a message reception from the gossip component with an invalid channel
   869  			c <- newMsg("AAA")
   870  			// Simulate a message reception from the gossip component
   871  			c <- newMsg("testchannelid")
   872  		}(c)
   873  		return c
   874  	}
   875  
   876  	g := &mocks.GossipMock{}
   877  	rmc := createChan(signalChan)
   878  	g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) {
   879  		signalChan <- struct{}{}
   880  	})
   881  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   882  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   883  	mc := &mockCommitter{Mock: &mock.Mock{}}
   884  	receivedChan := make(chan struct{})
   885  	mc.On("CommitLegacy", mock.Anything).Run(func(arguments mock.Arguments) {
   886  		block := arguments.Get(0).(*pcomm.Block)
   887  		assert.Equal(t, uint64(1), block.Header.Number)
   888  		receivedChan <- struct{}{}
   889  	})
   890  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   891  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   892  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   893  	defer p.shutdown()
   894  	select {
   895  	case <-receivedChan:
   896  	case <-time.After(time.Second * 15):
   897  		assert.Fail(t, "Didn't commit a block within a timely manner")
   898  	}
   899  }
   900  
   901  func TestLedgerHeightFromProperties(t *testing.T) {
   902  	// Scenario: For each test, spawn a peer and supply it
   903  	// with a specific mock of PeersOfChannel from peers that
   904  	// either set both metadata properly, or only the properties, or none, or both.
   905  	// Ensure the logic handles all of the 4 possible cases as needed
   906  
   907  	t.Parallel()
   908  	// Returns whether the given networkMember was selected or not
   909  	wasNetworkMemberSelected := func(t *testing.T, networkMember discovery.NetworkMember) bool {
   910  		var wasGivenNetworkMemberSelected int32
   911  		finChan := make(chan struct{})
   912  		g := &mocks.GossipMock{}
   913  		g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   914  			msg := arguments.Get(0).(*proto.GossipMessage)
   915  			assert.NotNil(t, msg.GetStateRequest())
   916  			peer := arguments.Get(1).([]*comm.RemotePeer)[0]
   917  			if bytes.Equal(networkMember.PKIid, peer.PKIID) {
   918  				atomic.StoreInt32(&wasGivenNetworkMemberSelected, 1)
   919  			}
   920  			finChan <- struct{}{}
   921  		})
   922  		g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   923  		g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   924  		defaultPeer := discovery.NetworkMember{
   925  			InternalEndpoint: "b",
   926  			PKIid:            common.PKIidType("b"),
   927  			Properties: &proto.Properties{
   928  				LedgerHeight: 5,
   929  			},
   930  		}
   931  		g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{
   932  			defaultPeer,
   933  			networkMember,
   934  		})
   935  		mc := &mockCommitter{Mock: &mock.Mock{}}
   936  		mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   937  		p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   938  		defer p.shutdown()
   939  		select {
   940  		case <-time.After(time.Second * 20):
   941  			t.Fatal("Didn't send a request within a timely manner")
   942  		case <-finChan:
   943  		}
   944  		return atomic.LoadInt32(&wasGivenNetworkMemberSelected) == 1
   945  	}
   946  
   947  	peerWithProperties := discovery.NetworkMember{
   948  		PKIid: common.PKIidType("peerWithoutMetadata"),
   949  		Properties: &proto.Properties{
   950  			LedgerHeight: 10,
   951  		},
   952  		InternalEndpoint: "peerWithoutMetadata",
   953  	}
   954  
   955  	peerWithoutProperties := discovery.NetworkMember{
   956  		PKIid:            common.PKIidType("peerWithoutProperties"),
   957  		InternalEndpoint: "peerWithoutProperties",
   958  	}
   959  
   960  	tests := []struct {
   961  		shouldGivenBeSelected bool
   962  		member                discovery.NetworkMember
   963  	}{
   964  		{member: peerWithProperties, shouldGivenBeSelected: true},
   965  		{member: peerWithoutProperties, shouldGivenBeSelected: false},
   966  	}
   967  
   968  	for _, tst := range tests {
   969  		assert.Equal(t, tst.shouldGivenBeSelected, wasNetworkMemberSelected(t, tst.member))
   970  	}
   971  }
   972  
   973  func TestAccessControl(t *testing.T) {
   974  	t.Parallel()
   975  	bootstrapSetSize := 5
   976  	bootstrapSet := make([]*peerNode, 0)
   977  
   978  	authorizedPeersSize := 4
   979  	var listeners []net.Listener
   980  	var endpoints []string
   981  
   982  	for i := 0; i < authorizedPeersSize; i++ {
   983  		ll, err := net.Listen("tcp", "127.0.0.1:0")
   984  		assert.NoError(t, err)
   985  		listeners = append(listeners, ll)
   986  		endpoint := ll.Addr().String()
   987  		endpoints = append(endpoints, endpoint)
   988  	}
   989  
   990  	defer func() {
   991  		for _, ll := range listeners {
   992  			ll.Close()
   993  		}
   994  	}()
   995  
   996  	authorizedPeers := map[string]struct{}{
   997  		endpoints[0]: {},
   998  		endpoints[1]: {},
   999  		endpoints[2]: {},
  1000  		endpoints[3]: {},
  1001  	}
  1002  
  1003  	blockPullPolicy := func(identity api.PeerIdentityType) error {
  1004  		if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized {
  1005  			return nil
  1006  		}
  1007  		return errors.New("Not authorized")
  1008  	}
  1009  
  1010  	var bootPorts []int
  1011  
  1012  	for i := 0; i < bootstrapSetSize; i++ {
  1013  		commit := newCommitter()
  1014  		bootPeer, bootPort := newBootNode(i, commit, blockPullPolicy)
  1015  		bootstrapSet = append(bootstrapSet, bootPeer)
  1016  		bootPorts = append(bootPorts, bootPort)
  1017  	}
  1018  
  1019  	defer func() {
  1020  		for _, p := range bootstrapSet {
  1021  			p.shutdown()
  1022  		}
  1023  	}()
  1024  
  1025  	msgCount := 5
  1026  
  1027  	for i := 1; i <= msgCount; i++ {
  1028  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1029  		if b, err := pb.Marshal(rawblock); err == nil {
  1030  			payload := &proto.Payload{
  1031  				SeqNum: uint64(i),
  1032  				Data:   b,
  1033  			}
  1034  			bootstrapSet[0].s.AddPayload(payload)
  1035  		} else {
  1036  			t.Fail()
  1037  		}
  1038  	}
  1039  
  1040  	standardPeerSetSize := 10
  1041  	peersSet := make([]*peerNode, 0)
  1042  
  1043  	for i := 0; i < standardPeerSetSize; i++ {
  1044  		commit := newCommitter()
  1045  		peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, blockPullPolicy, bootPorts...))
  1046  	}
  1047  
  1048  	defer func() {
  1049  		for _, p := range peersSet {
  1050  			p.shutdown()
  1051  		}
  1052  	}()
  1053  
  1054  	waitUntilTrueOrTimeout(t, func() bool {
  1055  		for _, p := range peersSet {
  1056  			if len(p.g.PeersOfChannel(common.ChannelID("testchannelid"))) != bootstrapSetSize+standardPeerSetSize-1 {
  1057  				t.Log("Peer discovery has not finished yet")
  1058  				return false
  1059  			}
  1060  		}
  1061  		t.Log("All peer discovered each other!!!")
  1062  		return true
  1063  	}, 30*time.Second)
  1064  
  1065  	t.Log("Waiting for all blocks to arrive.")
  1066  	waitUntilTrueOrTimeout(t, func() bool {
  1067  		t.Log("Trying to see all authorized peers get all blocks, and all non-authorized didn't")
  1068  		for _, p := range peersSet {
  1069  			height, err := p.commit.LedgerHeight()
  1070  			id := fmt.Sprintf("127.0.0.1:%d", p.port)
  1071  			if _, isAuthorized := authorizedPeers[id]; isAuthorized {
  1072  				if height != uint64(msgCount+1) || err != nil {
  1073  					return false
  1074  				}
  1075  			} else {
  1076  				if err == nil && height > 1 {
  1077  					assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height)
  1078  				}
  1079  			}
  1080  		}
  1081  		t.Log("All peers have same ledger height!!!")
  1082  		return true
  1083  	}, 60*time.Second)
  1084  }
  1085  
  1086  func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) {
  1087  	t.Parallel()
  1088  	bootstrapSetSize := 5
  1089  	bootstrapSet := make([]*peerNode, 0)
  1090  
  1091  	var bootPorts []int
  1092  
  1093  	for i := 0; i < bootstrapSetSize; i++ {
  1094  		commit := newCommitter()
  1095  		bootPeer, bootPort := newBootNode(i, commit, noopPeerIdentityAcceptor)
  1096  		bootstrapSet = append(bootstrapSet, bootPeer)
  1097  		bootPorts = append(bootPorts, bootPort)
  1098  	}
  1099  
  1100  	defer func() {
  1101  		for _, p := range bootstrapSet {
  1102  			p.shutdown()
  1103  		}
  1104  	}()
  1105  
  1106  	msgCount := 10
  1107  
  1108  	for i := 1; i <= msgCount; i++ {
  1109  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1110  		if b, err := pb.Marshal(rawblock); err == nil {
  1111  			payload := &proto.Payload{
  1112  				SeqNum: uint64(i),
  1113  				Data:   b,
  1114  			}
  1115  			bootstrapSet[0].s.AddPayload(payload)
  1116  		} else {
  1117  			t.Fail()
  1118  		}
  1119  	}
  1120  
  1121  	standartPeersSize := 10
  1122  	peersSet := make([]*peerNode, 0)
  1123  
  1124  	for i := 0; i < standartPeersSize; i++ {
  1125  		commit := newCommitter()
  1126  		peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, noopPeerIdentityAcceptor, bootPorts...))
  1127  	}
  1128  
  1129  	defer func() {
  1130  		for _, p := range peersSet {
  1131  			p.shutdown()
  1132  		}
  1133  	}()
  1134  
  1135  	waitUntilTrueOrTimeout(t, func() bool {
  1136  		for _, p := range peersSet {
  1137  			if len(p.g.PeersOfChannel(common.ChannelID("testchannelid"))) != bootstrapSetSize+standartPeersSize-1 {
  1138  				t.Log("Peer discovery has not finished yet")
  1139  				return false
  1140  			}
  1141  		}
  1142  		t.Log("All peer discovered each other!!!")
  1143  		return true
  1144  	}, 30*time.Second)
  1145  
  1146  	t.Log("Waiting for all blocks to arrive.")
  1147  	waitUntilTrueOrTimeout(t, func() bool {
  1148  		t.Log("Trying to see all peers get all blocks")
  1149  		for _, p := range peersSet {
  1150  			height, err := p.commit.LedgerHeight()
  1151  			if height != uint64(msgCount+1) || err != nil {
  1152  				return false
  1153  			}
  1154  		}
  1155  		t.Log("All peers have same ledger height!!!")
  1156  		return true
  1157  	}, 60*time.Second)
  1158  }
  1159  
  1160  // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into
  1161  // local ledger, next spawning a new peer waiting for anti-entropy procedure to
  1162  // complete missing blocks. Since state transfer messages now batched, it is expected
  1163  // to see _exactly_ two messages with state transfer response.
  1164  func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) {
  1165  	t.Parallel()
  1166  	bootPeer, bootPort := newBootNode(0, newCommitter(), noopPeerIdentityAcceptor)
  1167  	defer bootPeer.shutdown()
  1168  
  1169  	msgCount := defAntiEntropyBatchSize + 5
  1170  	expectedMessagesCnt := 2
  1171  
  1172  	for i := 1; i <= msgCount; i++ {
  1173  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1174  		if b, err := pb.Marshal(rawblock); err == nil {
  1175  			payload := &proto.Payload{
  1176  				SeqNum: uint64(i),
  1177  				Data:   b,
  1178  			}
  1179  			bootPeer.s.AddPayload(payload)
  1180  		} else {
  1181  			t.Fail()
  1182  		}
  1183  	}
  1184  
  1185  	peer := newPeerNode(1, newCommitter(), noopPeerIdentityAcceptor, bootPort)
  1186  	defer peer.shutdown()
  1187  
  1188  	naiveStateMsgPredicate := func(message interface{}) bool {
  1189  		return protoext.IsRemoteStateMessage(message.(protoext.ReceivedMessage).GetGossipMessage().GossipMessage)
  1190  	}
  1191  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
  1192  
  1193  	wg := sync.WaitGroup{}
  1194  	wg.Add(expectedMessagesCnt)
  1195  
  1196  	// Number of submitted messages is defAntiEntropyBatchSize + 5, therefore
  1197  	// expected number of batches is expectedMessagesCnt = 2. Following go routine
  1198  	// makes sure it receives expected amount of messages and sends signal of success
  1199  	// to continue the test
  1200  	go func() {
  1201  		for count := 0; count < expectedMessagesCnt; count++ {
  1202  			<-peerCh
  1203  			wg.Done()
  1204  		}
  1205  	}()
  1206  
  1207  	// Once we got message which indicate of two batches being received,
  1208  	// making sure messages indeed committed.
  1209  	waitUntilTrueOrTimeout(t, func() bool {
  1210  		if len(peer.g.PeersOfChannel(common.ChannelID("testchannelid"))) != 1 {
  1211  			t.Log("Peer discovery has not finished yet")
  1212  			return false
  1213  		}
  1214  		t.Log("All peer discovered each other!!!")
  1215  		return true
  1216  	}, 30*time.Second)
  1217  
  1218  	// Waits for message which indicates that expected number of message batches received
  1219  	// otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds
  1220  	wg.Wait()
  1221  
  1222  	t.Log("Waiting for all blocks to arrive.")
  1223  	waitUntilTrueOrTimeout(t, func() bool {
  1224  		t.Log("Trying to see all peers get all blocks")
  1225  		height, err := peer.commit.LedgerHeight()
  1226  		if height != uint64(msgCount+1) || err != nil {
  1227  			return false
  1228  		}
  1229  		t.Log("All peers have same ledger height!!!")
  1230  		return true
  1231  	}, 60*time.Second)
  1232  }
  1233  
  1234  // coordinatorMock mocking structure to capture mock interface for
  1235  // coord to simulate coord flow during the test
  1236  type coordinatorMock struct {
  1237  	committer.Committer
  1238  	mock.Mock
  1239  }
  1240  
  1241  func (mock *coordinatorMock) GetPvtDataAndBlockByNum(seqNum uint64, _ protoutil.SignedData) (*pcomm.Block, gutil.PvtDataCollections, error) {
  1242  	args := mock.Called(seqNum)
  1243  	return args.Get(0).(*pcomm.Block), args.Get(1).(gutil.PvtDataCollections), args.Error(2)
  1244  }
  1245  
  1246  func (mock *coordinatorMock) GetBlockByNum(seqNum uint64) (*pcomm.Block, error) {
  1247  	args := mock.Called(seqNum)
  1248  	return args.Get(0).(*pcomm.Block), args.Error(1)
  1249  }
  1250  
  1251  func (mock *coordinatorMock) StoreBlock(block *pcomm.Block, data gutil.PvtDataCollections) error {
  1252  	args := mock.Called(block, data)
  1253  	return args.Error(1)
  1254  }
  1255  
  1256  func (mock *coordinatorMock) LedgerHeight() (uint64, error) {
  1257  	args := mock.Called()
  1258  	return args.Get(0).(uint64), args.Error(1)
  1259  }
  1260  
  1261  func (mock *coordinatorMock) Close() {
  1262  	mock.Called()
  1263  }
  1264  
  1265  // StorePvtData used to persist private date into transient store
  1266  func (mock *coordinatorMock) StorePvtData(txid string, privData *tspb.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error {
  1267  	return mock.Called().Error(0)
  1268  }
  1269  
  1270  type receivedMessageMock struct {
  1271  	mock.Mock
  1272  }
  1273  
  1274  // Ack returns to the sender an acknowledgement for the message
  1275  func (mock *receivedMessageMock) Ack(err error) {
  1276  
  1277  }
  1278  
  1279  func (mock *receivedMessageMock) Respond(msg *proto.GossipMessage) {
  1280  	mock.Called(msg)
  1281  }
  1282  
  1283  func (mock *receivedMessageMock) GetGossipMessage() *protoext.SignedGossipMessage {
  1284  	args := mock.Called()
  1285  	return args.Get(0).(*protoext.SignedGossipMessage)
  1286  }
  1287  
  1288  func (mock *receivedMessageMock) GetSourceEnvelope() *proto.Envelope {
  1289  	args := mock.Called()
  1290  	return args.Get(0).(*proto.Envelope)
  1291  }
  1292  
  1293  func (mock *receivedMessageMock) GetConnectionInfo() *protoext.ConnectionInfo {
  1294  	args := mock.Called()
  1295  	return args.Get(0).(*protoext.ConnectionInfo)
  1296  }
  1297  
  1298  type testData struct {
  1299  	block   *pcomm.Block
  1300  	pvtData gutil.PvtDataCollections
  1301  }
  1302  
  1303  func TestTransferOfPrivateRWSet(t *testing.T) {
  1304  	t.Parallel()
  1305  	chainID := "testChainID"
  1306  
  1307  	// First gossip instance
  1308  	g := &mocks.GossipMock{}
  1309  	coord1 := new(coordinatorMock)
  1310  
  1311  	gossipChannel := make(chan *proto.GossipMessage)
  1312  	commChannel := make(chan protoext.ReceivedMessage)
  1313  
  1314  	gossipChannelFactory := func(ch chan *proto.GossipMessage) <-chan *proto.GossipMessage {
  1315  		return ch
  1316  	}
  1317  
  1318  	g.On("Accept", mock.Anything, false).Return(gossipChannelFactory(gossipChannel), nil)
  1319  	g.On("Accept", mock.Anything, true).Return(nil, commChannel)
  1320  
  1321  	g.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
  1322  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
  1323  	g.On("Close")
  1324  
  1325  	coord1.On("LedgerHeight", mock.Anything).Return(uint64(5), nil)
  1326  
  1327  	var data = map[uint64]*testData{
  1328  		uint64(2): {
  1329  			block: &pcomm.Block{
  1330  				Header: &pcomm.BlockHeader{
  1331  					Number:       2,
  1332  					DataHash:     []byte{0, 1, 1, 1},
  1333  					PreviousHash: []byte{0, 0, 0, 1},
  1334  				},
  1335  				Data: &pcomm.BlockData{
  1336  					Data: [][]byte{{1}, {2}, {3}},
  1337  				},
  1338  			},
  1339  			pvtData: gutil.PvtDataCollections{
  1340  				{
  1341  					SeqInBlock: uint64(0),
  1342  					WriteSet: &rwset.TxPvtReadWriteSet{
  1343  						DataModel: rwset.TxReadWriteSet_KV,
  1344  						NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1345  							{
  1346  								Namespace: "myCC:v1",
  1347  								CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1348  									{
  1349  										CollectionName: "mysecrectCollection",
  1350  										Rwset:          []byte{1, 2, 3, 4, 5},
  1351  									},
  1352  								},
  1353  							},
  1354  						},
  1355  					},
  1356  				},
  1357  			},
  1358  		},
  1359  
  1360  		uint64(3): {
  1361  			block: &pcomm.Block{
  1362  				Header: &pcomm.BlockHeader{
  1363  					Number:       3,
  1364  					DataHash:     []byte{1, 1, 1, 1},
  1365  					PreviousHash: []byte{0, 1, 1, 1},
  1366  				},
  1367  				Data: &pcomm.BlockData{
  1368  					Data: [][]byte{{4}, {5}, {6}},
  1369  				},
  1370  			},
  1371  			pvtData: gutil.PvtDataCollections{
  1372  				{
  1373  					SeqInBlock: uint64(2),
  1374  					WriteSet: &rwset.TxPvtReadWriteSet{
  1375  						DataModel: rwset.TxReadWriteSet_KV,
  1376  						NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1377  							{
  1378  								Namespace: "otherCC:v1",
  1379  								CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1380  									{
  1381  										CollectionName: "topClassified",
  1382  										Rwset:          []byte{0, 0, 0, 4, 2},
  1383  									},
  1384  								},
  1385  							},
  1386  						},
  1387  					},
  1388  				},
  1389  			},
  1390  		},
  1391  	}
  1392  
  1393  	for seqNum, each := range data {
  1394  		coord1.On("GetPvtDataAndBlockByNum", seqNum).Return(each.block, each.pvtData, nil /* no error*/)
  1395  	}
  1396  
  1397  	coord1.On("Close")
  1398  
  1399  	servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}}
  1400  	stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
  1401  	stateConfig := &StateConfig{
  1402  		StateCheckInterval:   DefStateCheckInterval,
  1403  		StateResponseTimeout: DefStateResponseTimeout,
  1404  		StateBatchSize:       DefStateBatchSize,
  1405  		StateMaxRetries:      DefStateMaxRetries,
  1406  		StateBlockBufferSize: DefStateBlockBufferSize,
  1407  		StateChannelSize:     DefStateChannelSize,
  1408  		StateEnabled:         DefStateEnabled,
  1409  	}
  1410  	logger := flogging.MustGetLogger(gutil.StateLogger)
  1411  	st := NewGossipStateProvider(logger, chainID, servicesAdapater, coord1, stateMetrics, blocking, stateConfig)
  1412  	defer st.Stop()
  1413  
  1414  	// Mocked state request message
  1415  	requestMsg := new(receivedMessageMock)
  1416  
  1417  	// Get state request message, blocks [2...3]
  1418  	requestGossipMsg := &proto.GossipMessage{
  1419  		// Copy nonce field from the request, so it will be possible to match response
  1420  		Nonce:   1,
  1421  		Tag:     proto.GossipMessage_CHAN_OR_ORG,
  1422  		Channel: []byte(chainID),
  1423  		Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{
  1424  			StartSeqNum: 2,
  1425  			EndSeqNum:   3,
  1426  		}},
  1427  	}
  1428  
  1429  	msg, _ := protoext.NoopSign(requestGossipMsg)
  1430  
  1431  	requestMsg.On("GetGossipMessage").Return(msg)
  1432  	requestMsg.On("GetConnectionInfo").Return(&protoext.ConnectionInfo{
  1433  		Auth: &protoext.AuthInfo{},
  1434  	})
  1435  
  1436  	// Channel to send responses back
  1437  	responseChannel := make(chan protoext.ReceivedMessage)
  1438  	defer close(responseChannel)
  1439  
  1440  	requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
  1441  		// Get gossip response to respond back on state request
  1442  		response := args.Get(0).(*proto.GossipMessage)
  1443  		// Wrap it up into received response
  1444  		receivedMsg := new(receivedMessageMock)
  1445  		// Create sign response
  1446  		msg, _ := protoext.NoopSign(response)
  1447  		// Mock to respond
  1448  		receivedMsg.On("GetGossipMessage").Return(msg)
  1449  		// Send response
  1450  		responseChannel <- receivedMsg
  1451  	})
  1452  
  1453  	// Send request message via communication channel into state transfer
  1454  	commChannel <- requestMsg
  1455  
  1456  	// State transfer request should result in state response back
  1457  	response := <-responseChannel
  1458  
  1459  	// Start the assertion section
  1460  	stateResponse := response.GetGossipMessage().GetStateResponse()
  1461  
  1462  	assertion := assert.New(t)
  1463  	// Nonce should be equal to Nonce of the request
  1464  	assertion.Equal(response.GetGossipMessage().Nonce, uint64(1))
  1465  	// Payload should not need be nil
  1466  	assertion.NotNil(stateResponse)
  1467  	assertion.NotNil(stateResponse.Payloads)
  1468  	// Exactly two messages expected
  1469  	assertion.Equal(len(stateResponse.Payloads), 2)
  1470  
  1471  	// Assert we have all data and it's same as we expected it
  1472  	for _, each := range stateResponse.Payloads {
  1473  		block := &pcomm.Block{}
  1474  		err := pb.Unmarshal(each.Data, block)
  1475  		assertion.NoError(err)
  1476  
  1477  		assertion.NotNil(block.Header)
  1478  
  1479  		testBlock, ok := data[block.Header.Number]
  1480  		assertion.True(ok)
  1481  
  1482  		for i, d := range testBlock.block.Data.Data {
  1483  			assertion.True(bytes.Equal(d, block.Data.Data[i]))
  1484  		}
  1485  
  1486  		for i, p := range testBlock.pvtData {
  1487  			pvtDataPayload := &proto.PvtDataPayload{}
  1488  			err := pb.Unmarshal(each.PrivateData[i], pvtDataPayload)
  1489  			assertion.NoError(err)
  1490  			pvtRWSet := &rwset.TxPvtReadWriteSet{}
  1491  			err = pb.Unmarshal(pvtDataPayload.Payload, pvtRWSet)
  1492  			assertion.NoError(err)
  1493  			assertion.True(pb.Equal(p.WriteSet, pvtRWSet))
  1494  		}
  1495  	}
  1496  }
  1497  
  1498  type testPeer struct {
  1499  	*mocks.GossipMock
  1500  	id            string
  1501  	gossipChannel chan *proto.GossipMessage
  1502  	commChannel   chan protoext.ReceivedMessage
  1503  	coord         *coordinatorMock
  1504  }
  1505  
  1506  func (t testPeer) Gossip() <-chan *proto.GossipMessage {
  1507  	return t.gossipChannel
  1508  }
  1509  
  1510  func (t testPeer) Comm() chan protoext.ReceivedMessage {
  1511  	return t.commChannel
  1512  }
  1513  
  1514  var peers = map[string]testPeer{
  1515  	"peer1": {
  1516  		id:            "peer1",
  1517  		gossipChannel: make(chan *proto.GossipMessage),
  1518  		commChannel:   make(chan protoext.ReceivedMessage),
  1519  		GossipMock:    &mocks.GossipMock{},
  1520  		coord:         new(coordinatorMock),
  1521  	},
  1522  	"peer2": {
  1523  		id:            "peer2",
  1524  		gossipChannel: make(chan *proto.GossipMessage),
  1525  		commChannel:   make(chan protoext.ReceivedMessage),
  1526  		GossipMock:    &mocks.GossipMock{},
  1527  		coord:         new(coordinatorMock),
  1528  	},
  1529  }
  1530  
  1531  func TestTransferOfPvtDataBetweenPeers(t *testing.T) {
  1532  	/*
  1533  	   This test covers pretty basic scenario, there are two peers: "peer1" and "peer2",
  1534  	   while peer2 missing a few blocks in the ledger therefore asking to replicate those
  1535  	   blocks from the first peers.
  1536  
  1537  	   Test going to check that block from one peer will be replicated into second one and
  1538  	   have identical content.
  1539  	*/
  1540  	t.Parallel()
  1541  	chainID := "testChainID"
  1542  
  1543  	// Initialize peer
  1544  	for _, peer := range peers {
  1545  		peer.On("Accept", mock.Anything, false).Return(peer.Gossip(), nil)
  1546  
  1547  		peer.On("Accept", mock.Anything, true).
  1548  			Return(nil, peer.Comm()).
  1549  			Once().
  1550  			On("Accept", mock.Anything, true).
  1551  			Return(nil, make(chan protoext.ReceivedMessage))
  1552  
  1553  		peer.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
  1554  		peer.coord.On("Close")
  1555  		peer.On("Close")
  1556  	}
  1557  
  1558  	// First peer going to have more advanced ledger
  1559  	peers["peer1"].coord.On("LedgerHeight", mock.Anything).Return(uint64(3), nil)
  1560  
  1561  	// Second peer has a gap of one block, hence it will have to replicate it from previous
  1562  	peers["peer2"].coord.On("LedgerHeight", mock.Anything).Return(uint64(2), nil)
  1563  
  1564  	peers["peer1"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
  1565  	peers["peer2"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
  1566  
  1567  	peers["peer1"].coord.On("GetPvtDataAndBlockByNum", uint64(2)).Return(&pcomm.Block{
  1568  		Header: &pcomm.BlockHeader{
  1569  			Number:       2,
  1570  			DataHash:     []byte{0, 0, 0, 1},
  1571  			PreviousHash: []byte{0, 1, 1, 1},
  1572  		},
  1573  		Data: &pcomm.BlockData{
  1574  			Data: [][]byte{{4}, {5}, {6}},
  1575  		},
  1576  	}, gutil.PvtDataCollections{&ledger.TxPvtData{
  1577  		SeqInBlock: uint64(1),
  1578  		WriteSet: &rwset.TxPvtReadWriteSet{
  1579  			DataModel: rwset.TxReadWriteSet_KV,
  1580  			NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1581  				{
  1582  					Namespace: "myCC:v1",
  1583  					CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1584  						{
  1585  							CollectionName: "mysecrectCollection",
  1586  							Rwset:          []byte{1, 2, 3, 4, 5},
  1587  						},
  1588  					},
  1589  				},
  1590  			},
  1591  		},
  1592  	}}, nil)
  1593  
  1594  	// Return membership of the peers
  1595  	member2 := discovery.NetworkMember{
  1596  		PKIid:            common.PKIidType([]byte{2}),
  1597  		Endpoint:         "peer2:7051",
  1598  		InternalEndpoint: "peer2:7051",
  1599  		Properties: &proto.Properties{
  1600  			LedgerHeight: 2,
  1601  		},
  1602  	}
  1603  
  1604  	member1 := discovery.NetworkMember{
  1605  		PKIid:            common.PKIidType([]byte{1}),
  1606  		Endpoint:         "peer1:7051",
  1607  		InternalEndpoint: "peer1:7051",
  1608  		Properties: &proto.Properties{
  1609  			LedgerHeight: 3,
  1610  		},
  1611  	}
  1612  
  1613  	peers["peer1"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member2})
  1614  	peers["peer2"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member1})
  1615  
  1616  	peers["peer2"].On("Send", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
  1617  		request := args.Get(0).(*proto.GossipMessage)
  1618  		requestMsg := new(receivedMessageMock)
  1619  		msg, _ := protoext.NoopSign(request)
  1620  		requestMsg.On("GetGossipMessage").Return(msg)
  1621  		requestMsg.On("GetConnectionInfo").Return(&protoext.ConnectionInfo{
  1622  			Auth: &protoext.AuthInfo{},
  1623  		})
  1624  
  1625  		requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
  1626  			response := args.Get(0).(*proto.GossipMessage)
  1627  			receivedMsg := new(receivedMessageMock)
  1628  			msg, _ := protoext.NoopSign(response)
  1629  			receivedMsg.On("GetGossipMessage").Return(msg)
  1630  			// Send response back to the peer
  1631  			peers["peer2"].commChannel <- receivedMsg
  1632  		})
  1633  
  1634  		peers["peer1"].commChannel <- requestMsg
  1635  	})
  1636  
  1637  	wg := sync.WaitGroup{}
  1638  	wg.Add(1)
  1639  	peers["peer2"].coord.On("StoreBlock", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
  1640  		wg.Done() // Done once second peer hits commit of the block
  1641  	}).Return([]string{}, nil) // No pvt data to complete and no error
  1642  
  1643  	cryptoService := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
  1644  
  1645  	stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
  1646  
  1647  	mediator := &ServicesMediator{GossipAdapter: peers["peer1"], MCSAdapter: cryptoService}
  1648  	stateConfig := &StateConfig{
  1649  		StateCheckInterval:   DefStateCheckInterval,
  1650  		StateResponseTimeout: DefStateResponseTimeout,
  1651  		StateBatchSize:       DefStateBatchSize,
  1652  		StateMaxRetries:      DefStateMaxRetries,
  1653  		StateBlockBufferSize: DefStateBlockBufferSize,
  1654  		StateChannelSize:     DefStateChannelSize,
  1655  		StateEnabled:         DefStateEnabled,
  1656  	}
  1657  	logger := flogging.MustGetLogger(gutil.StateLogger)
  1658  	peer1State := NewGossipStateProvider(logger, chainID, mediator, peers["peer1"].coord, stateMetrics, blocking, stateConfig)
  1659  	defer peer1State.Stop()
  1660  
  1661  	mediator = &ServicesMediator{GossipAdapter: peers["peer2"], MCSAdapter: cryptoService}
  1662  	logger = flogging.MustGetLogger(gutil.StateLogger)
  1663  	peer2State := NewGossipStateProvider(logger, chainID, mediator, peers["peer2"].coord, stateMetrics, blocking, stateConfig)
  1664  	defer peer2State.Stop()
  1665  
  1666  	// Make sure state was replicated
  1667  	done := make(chan struct{})
  1668  	go func() {
  1669  		wg.Wait()
  1670  		done <- struct{}{}
  1671  	}()
  1672  
  1673  	select {
  1674  	case <-done:
  1675  		break
  1676  	case <-time.After(30 * time.Second):
  1677  		t.Fail()
  1678  	}
  1679  }
  1680  
  1681  func TestStateRequestValidator(t *testing.T) {
  1682  	validator := &stateRequestValidator{}
  1683  	err := validator.validate(&proto.RemoteStateRequest{
  1684  		StartSeqNum: 10,
  1685  		EndSeqNum:   5,
  1686  	}, defAntiEntropyBatchSize)
  1687  	assert.Contains(t, err.Error(), "Invalid sequence interval [10...5).")
  1688  	assert.Error(t, err)
  1689  
  1690  	err = validator.validate(&proto.RemoteStateRequest{
  1691  		StartSeqNum: 10,
  1692  		EndSeqNum:   30,
  1693  	}, defAntiEntropyBatchSize)
  1694  	assert.Contains(t, err.Error(), "Requesting blocks range [10-30) greater than configured")
  1695  	assert.Error(t, err)
  1696  
  1697  	err = validator.validate(&proto.RemoteStateRequest{
  1698  		StartSeqNum: 10,
  1699  		EndSeqNum:   20,
  1700  	}, defAntiEntropyBatchSize)
  1701  	assert.NoError(t, err)
  1702  }
  1703  
  1704  func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) {
  1705  	ch := make(chan struct{})
  1706  	t.Log("Started to spin off, until predicate will be satisfied.")
  1707  
  1708  	go func() {
  1709  		t := time.NewTicker(time.Second)
  1710  		for !predicate() {
  1711  			select {
  1712  			case <-ch:
  1713  				t.Stop()
  1714  				return
  1715  			case <-t.C:
  1716  			}
  1717  		}
  1718  		t.Stop()
  1719  		close(ch)
  1720  	}()
  1721  
  1722  	select {
  1723  	case <-ch:
  1724  		t.Log("Done.")
  1725  		break
  1726  	case <-time.After(timeout):
  1727  		t.Fatal("Timeout has expired")
  1728  		close(ch)
  1729  		break
  1730  	}
  1731  	t.Log("Stop waiting until timeout or true")
  1732  }