github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/gossip/state/state_test.go (about)

     1  /*
     2  Copyright hechain. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package state
     8  
     9  import (
    10  	"bytes"
    11  	"errors"
    12  	"fmt"
    13  	"math/rand"
    14  	"net"
    15  	"sync"
    16  	"sync/atomic"
    17  	"testing"
    18  	"time"
    19  
    20  	pb "github.com/golang/protobuf/proto"
    21  	"github.com/hechain20/hechain/bccsp/factory"
    22  	"github.com/hechain20/hechain/common/configtx/test"
    23  	errors2 "github.com/hechain20/hechain/common/errors"
    24  	"github.com/hechain20/hechain/common/flogging"
    25  	"github.com/hechain20/hechain/common/metrics/disabled"
    26  	"github.com/hechain20/hechain/core/committer"
    27  	"github.com/hechain20/hechain/core/committer/txvalidator"
    28  	"github.com/hechain20/hechain/core/ledger"
    29  	"github.com/hechain20/hechain/core/mocks/validator"
    30  	"github.com/hechain20/hechain/core/transientstore"
    31  	"github.com/hechain20/hechain/gossip/api"
    32  	"github.com/hechain20/hechain/gossip/comm"
    33  	"github.com/hechain20/hechain/gossip/common"
    34  	"github.com/hechain20/hechain/gossip/discovery"
    35  	"github.com/hechain20/hechain/gossip/gossip"
    36  	"github.com/hechain20/hechain/gossip/gossip/algo"
    37  	"github.com/hechain20/hechain/gossip/gossip/channel"
    38  	"github.com/hechain20/hechain/gossip/metrics"
    39  	"github.com/hechain20/hechain/gossip/privdata"
    40  	capabilitymock "github.com/hechain20/hechain/gossip/privdata/mocks"
    41  	"github.com/hechain20/hechain/gossip/protoext"
    42  	"github.com/hechain20/hechain/gossip/state/mocks"
    43  	gossiputil "github.com/hechain20/hechain/gossip/util"
    44  	corecomm "github.com/hechain20/hechain/internal/pkg/comm"
    45  	"github.com/hechain20/hechain/protoutil"
    46  	pcomm "github.com/hyperledger/fabric-protos-go/common"
    47  	proto "github.com/hyperledger/fabric-protos-go/gossip"
    48  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    49  	tspb "github.com/hyperledger/fabric-protos-go/transientstore"
    50  	"github.com/onsi/gomega"
    51  	"github.com/onsi/gomega/gbytes"
    52  	"github.com/stretchr/testify/mock"
    53  	"github.com/stretchr/testify/require"
    54  	"go.uber.org/zap"
    55  	"go.uber.org/zap/zapcore"
    56  )
    57  
    58  var (
    59  	orgID = []byte("ORG1")
    60  
    61  	noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error {
    62  		return nil
    63  	}
    64  )
    65  
    66  type peerIdentityAcceptor func(identity api.PeerIdentityType) error
    67  
    68  type joinChanMsg struct{}
    69  
    70  func init() {
    71  	gossiputil.SetupTestLogging()
    72  	factory.InitFactories(nil)
    73  }
    74  
    75  // SequenceNumber returns the sequence number of the block that the message
    76  // is derived from
    77  func (*joinChanMsg) SequenceNumber() uint64 {
    78  	return uint64(time.Now().UnixNano())
    79  }
    80  
    81  // Members returns the organizations of the channel
    82  func (jcm *joinChanMsg) Members() []api.OrgIdentityType {
    83  	return []api.OrgIdentityType{orgID}
    84  }
    85  
    86  // AnchorPeersOf returns the anchor peers of the given organization
    87  func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
    88  	return []api.AnchorPeer{}
    89  }
    90  
    91  type orgCryptoService struct{}
    92  
    93  // OrgByPeerIdentity returns the OrgIdentityType
    94  // of a given peer identity
    95  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
    96  	return orgID
    97  }
    98  
    99  // Verify verifies a JoinChannelMessage, returns nil on success,
   100  // and an error on failure
   101  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
   102  	return nil
   103  }
   104  
   105  type cryptoServiceMock struct {
   106  	acceptor peerIdentityAcceptor
   107  }
   108  
   109  func (cryptoServiceMock) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) {
   110  	return time.Now().Add(time.Hour), nil
   111  }
   112  
   113  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   114  func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType {
   115  	return common.PKIidType(peerIdentity)
   116  }
   117  
   118  // VerifyBlock returns nil if the block is properly signed,
   119  // else returns error
   120  func (*cryptoServiceMock) VerifyBlock(channelID common.ChannelID, seqNum uint64, signedBlock *pcomm.Block) error {
   121  	return nil
   122  }
   123  
   124  // Sign signs msg with this peer's signing key and outputs
   125  // the signature if no error occurred.
   126  func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) {
   127  	clone := make([]byte, len(msg))
   128  	copy(clone, msg)
   129  	return clone, nil
   130  }
   131  
   132  // Verify checks that signature is a valid signature of message under a peer's verification key.
   133  // If the verification succeeded, Verify returns nil meaning no error occurred.
   134  // If peerCert is nil, then the signature is verified against this peer's verification key.
   135  func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   136  	equal := bytes.Equal(signature, message)
   137  	if !equal {
   138  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   139  	}
   140  	return nil
   141  }
   142  
   143  // VerifyByChannel checks that signature is a valid signature of message
   144  // under a peer's verification key, but also in the context of a specific channel.
   145  // If the verification succeeded, Verify returns nil meaning no error occurred.
   146  // If peerIdentity is nil, then the signature is verified against this peer's verification key.
   147  func (cs *cryptoServiceMock) VerifyByChannel(channelID common.ChannelID, peerIdentity api.PeerIdentityType, signature, message []byte) error {
   148  	return cs.acceptor(peerIdentity)
   149  }
   150  
   151  func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   152  	return nil
   153  }
   154  
   155  func bootPeersWithPorts(ports ...int) []string {
   156  	var peers []string
   157  	for _, port := range ports {
   158  		peers = append(peers, fmt.Sprintf("127.0.0.1:%d", port))
   159  	}
   160  	return peers
   161  }
   162  
   163  type peerNodeGossipSupport interface {
   164  	GossipAdapter
   165  	Stop()
   166  	JoinChan(joinMsg api.JoinChannelMessage, channelID common.ChannelID)
   167  }
   168  
   169  // Simple presentation of peer which includes only
   170  // communication module, gossip and state transfer
   171  type peerNode struct {
   172  	port   int
   173  	g      peerNodeGossipSupport
   174  	s      *GossipStateProviderImpl
   175  	cs     *cryptoServiceMock
   176  	commit committer.Committer
   177  	grpc   *corecomm.GRPCServer
   178  }
   179  
   180  // Shutting down all modules used
   181  func (node *peerNode) shutdown() {
   182  	node.s.Stop()
   183  	node.g.Stop()
   184  	node.grpc.Stop()
   185  }
   186  
   187  type mockCommitter struct {
   188  	*mock.Mock
   189  	sync.Mutex
   190  }
   191  
   192  func (mc *mockCommitter) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   193  	args := mc.Called()
   194  	return args.Get(0).(ledger.ConfigHistoryRetriever), args.Error(1)
   195  }
   196  
   197  func (mc *mockCommitter) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   198  	args := mc.Called(blockNum, filter)
   199  	return args.Get(0).([]*ledger.TxPvtData), args.Error(1)
   200  }
   201  
   202  func (mc *mockCommitter) CommitLegacy(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   203  	mc.Lock()
   204  	m := mc.Mock
   205  	mc.Unlock()
   206  	m.Called(blockAndPvtData.Block)
   207  	return nil
   208  }
   209  
   210  func (mc *mockCommitter) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) {
   211  	mc.Lock()
   212  	m := mc.Mock
   213  	mc.Unlock()
   214  
   215  	args := m.Called(seqNum)
   216  	return args.Get(0).(*ledger.BlockAndPvtData), args.Error(1)
   217  }
   218  
   219  func (mc *mockCommitter) LedgerHeight() (uint64, error) {
   220  	mc.Lock()
   221  	m := mc.Mock
   222  	mc.Unlock()
   223  	args := m.Called()
   224  	if args.Get(1) == nil {
   225  		return args.Get(0).(uint64), nil
   226  	}
   227  	return args.Get(0).(uint64), args.Get(1).(error)
   228  }
   229  
   230  func (mc *mockCommitter) DoesPvtDataInfoExistInLedger(blkNum uint64) (bool, error) {
   231  	mc.Lock()
   232  	m := mc.Mock
   233  	mc.Unlock()
   234  	args := m.Called(blkNum)
   235  	return args.Get(0).(bool), args.Error(1)
   236  }
   237  
   238  func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block {
   239  	mc.Lock()
   240  	m := mc.Mock
   241  	mc.Unlock()
   242  
   243  	if m.Called(blockSeqs).Get(0) == nil {
   244  		return nil
   245  	}
   246  	return m.Called(blockSeqs).Get(0).([]*pcomm.Block)
   247  }
   248  
   249  func (*mockCommitter) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   250  	panic("implement me")
   251  }
   252  
   253  func (*mockCommitter) CommitPvtDataOfOldBlocks(
   254  	reconciledPvtdata []*ledger.ReconciledPvtdata,
   255  	unreconciled ledger.MissingPvtDataInfo,
   256  ) ([]*ledger.PvtdataHashMismatch, error) {
   257  	panic("implement me")
   258  }
   259  
   260  func (*mockCommitter) Close() {
   261  }
   262  
   263  type ramLedger struct {
   264  	ledger map[uint64]*ledger.BlockAndPvtData
   265  	sync.RWMutex
   266  }
   267  
   268  func (mock *ramLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   269  	panic("implement me")
   270  }
   271  
   272  func (mock *ramLedger) CommitPvtDataOfOldBlocks(
   273  	reconciledPvtdata []*ledger.ReconciledPvtdata,
   274  	unreconciled ledger.MissingPvtDataInfo,
   275  ) ([]*ledger.PvtdataHashMismatch, error) {
   276  	panic("implement me")
   277  }
   278  
   279  func (mock *ramLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   280  	panic("implement me")
   281  }
   282  
   283  func (mock *ramLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) {
   284  	mock.RLock()
   285  	defer mock.RUnlock()
   286  
   287  	if block, ok := mock.ledger[blockNum]; !ok {
   288  		return nil, fmt.Errorf("no block with seq = %d found", blockNum)
   289  	} else {
   290  		return block, nil
   291  	}
   292  }
   293  
   294  func (mock *ramLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   295  	panic("implement me")
   296  }
   297  
   298  func (mock *ramLedger) CommitLegacy(blockAndPvtdata *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   299  	mock.Lock()
   300  	defer mock.Unlock()
   301  
   302  	if blockAndPvtdata != nil && blockAndPvtdata.Block != nil {
   303  		mock.ledger[blockAndPvtdata.Block.Header.Number] = blockAndPvtdata
   304  		return nil
   305  	}
   306  	return errors.New("invalid input parameters for block and private data param")
   307  }
   308  
   309  func (mock *ramLedger) GetBlockchainInfo() (*pcomm.BlockchainInfo, error) {
   310  	mock.RLock()
   311  	defer mock.RUnlock()
   312  
   313  	currentBlock := mock.ledger[uint64(len(mock.ledger)-1)].Block
   314  	return &pcomm.BlockchainInfo{
   315  		Height:            currentBlock.Header.Number + 1,
   316  		CurrentBlockHash:  protoutil.BlockHeaderHash(currentBlock.Header),
   317  		PreviousBlockHash: currentBlock.Header.PreviousHash,
   318  	}, nil
   319  }
   320  
   321  func (mock *ramLedger) DoesPvtDataInfoExist(blkNum uint64) (bool, error) {
   322  	return false, nil
   323  }
   324  
   325  func (mock *ramLedger) GetBlockByNumber(blockNumber uint64) (*pcomm.Block, error) {
   326  	mock.RLock()
   327  	defer mock.RUnlock()
   328  
   329  	if blockAndPvtData, ok := mock.ledger[blockNumber]; !ok {
   330  		return nil, fmt.Errorf("no block with seq = %d found", blockNumber)
   331  	} else {
   332  		return blockAndPvtData.Block, nil
   333  	}
   334  }
   335  
   336  func (mock *ramLedger) Close() {
   337  }
   338  
   339  // Create new instance of KVLedger to be used for testing
   340  func newCommitter() committer.Committer {
   341  	cb, _ := test.MakeGenesisBlock("testChain")
   342  	ldgr := &ramLedger{
   343  		ledger: make(map[uint64]*ledger.BlockAndPvtData),
   344  	}
   345  	ldgr.CommitLegacy(&ledger.BlockAndPvtData{Block: cb}, &ledger.CommitOptions{})
   346  	return committer.NewLedgerCommitter(ldgr)
   347  }
   348  
   349  func newPeerNodeWithGossip(id int, committer committer.Committer,
   350  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, bootPorts ...int) *peerNode {
   351  	logger := flogging.MustGetLogger(gossiputil.StateLogger)
   352  	return newPeerNodeWithGossipWithValidator(logger, id, committer, acceptor, g, &validator.MockValidator{}, bootPorts...)
   353  }
   354  
   355  // Constructing pseudo peer node, simulating only gossip and state transfer part
   356  func newPeerNodeWithGossipWithValidatorWithMetrics(logger gossiputil.Logger, id int, committer committer.Committer,
   357  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, v txvalidator.Validator,
   358  	gossipMetrics *metrics.GossipMetrics, bootPorts ...int) (node *peerNode, port int) {
   359  	cs := &cryptoServiceMock{acceptor: acceptor}
   360  	port, gRPCServer, certs, secureDialOpts, _ := gossiputil.CreateGRPCLayer()
   361  
   362  	if g == nil {
   363  		config := &gossip.Config{
   364  			BindPort:                     port,
   365  			BootstrapPeers:               bootPeersWithPorts(bootPorts...),
   366  			ID:                           fmt.Sprintf("p%d", id),
   367  			MaxBlockCountToStore:         0,
   368  			MaxPropagationBurstLatency:   time.Duration(10) * time.Millisecond,
   369  			MaxPropagationBurstSize:      10,
   370  			PropagateIterations:          1,
   371  			PropagatePeerNum:             3,
   372  			PullInterval:                 time.Duration(4) * time.Second,
   373  			PullPeerNum:                  5,
   374  			InternalEndpoint:             fmt.Sprintf("127.0.0.1:%d", port),
   375  			PublishCertPeriod:            10 * time.Second,
   376  			RequestStateInfoInterval:     4 * time.Second,
   377  			PublishStateInfoInterval:     4 * time.Second,
   378  			TimeForMembershipTracker:     5 * time.Second,
   379  			TLSCerts:                     certs,
   380  			DigestWaitTime:               algo.DefDigestWaitTime,
   381  			RequestWaitTime:              algo.DefRequestWaitTime,
   382  			ResponseWaitTime:             algo.DefResponseWaitTime,
   383  			DialTimeout:                  comm.DefDialTimeout,
   384  			ConnTimeout:                  comm.DefConnTimeout,
   385  			RecvBuffSize:                 comm.DefRecvBuffSize,
   386  			SendBuffSize:                 comm.DefSendBuffSize,
   387  			MsgExpirationTimeout:         channel.DefMsgExpirationTimeout,
   388  			AliveTimeInterval:            discovery.DefAliveTimeInterval,
   389  			AliveExpirationTimeout:       discovery.DefAliveExpirationTimeout,
   390  			AliveExpirationCheckInterval: discovery.DefAliveExpirationCheckInterval,
   391  			ReconnectInterval:            discovery.DefReconnectInterval,
   392  			MaxConnectionAttempts:        discovery.DefMaxConnectionAttempts,
   393  			MsgExpirationFactor:          discovery.DefMsgExpirationFactor,
   394  		}
   395  
   396  		selfID := api.PeerIdentityType(config.InternalEndpoint)
   397  		mcs := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
   398  		g = gossip.New(config, gRPCServer.Server(), &orgCryptoService{}, mcs, selfID, secureDialOpts, gossipMetrics, nil)
   399  	}
   400  
   401  	g.JoinChan(&joinChanMsg{}, common.ChannelID("testchannelid"))
   402  
   403  	go func() {
   404  		gRPCServer.Start()
   405  	}()
   406  
   407  	// Initialize pseudo peer simulator, which has only three
   408  	// basic parts
   409  
   410  	servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: cs}
   411  	coordConfig := privdata.CoordinatorConfig{
   412  		PullRetryThreshold:             0,
   413  		TransientBlockRetention:        1000,
   414  		SkipPullingInvalidTransactions: false,
   415  	}
   416  
   417  	mspID := "Org1MSP"
   418  	capabilityProvider := &capabilitymock.CapabilityProvider{}
   419  	appCapability := &capabilitymock.AppCapabilities{}
   420  	capabilityProvider.On("Capabilities").Return(appCapability)
   421  	appCapability.On("StorePvtDataOfInvalidTx").Return(true)
   422  	coord := privdata.NewCoordinator(mspID, privdata.Support{
   423  		Validator:          v,
   424  		Committer:          committer,
   425  		CapabilityProvider: capabilityProvider,
   426  	}, &transientstore.Store{}, protoutil.SignedData{}, gossipMetrics.PrivdataMetrics, coordConfig, nil)
   427  	stateConfig := &StateConfig{
   428  		StateCheckInterval:   DefStateCheckInterval,
   429  		StateResponseTimeout: DefStateResponseTimeout,
   430  		StateBatchSize:       DefStateBatchSize,
   431  		StateMaxRetries:      DefStateMaxRetries,
   432  		StateBlockBufferSize: DefStateBlockBufferSize,
   433  		StateChannelSize:     DefStateChannelSize,
   434  		StateEnabled:         true,
   435  	}
   436  	sp := NewGossipStateProvider(logger, "testchannelid", servicesAdapater, coord, gossipMetrics.StateMetrics, blocking, stateConfig)
   437  	if sp == nil {
   438  		gRPCServer.Stop()
   439  		return nil, port
   440  	}
   441  
   442  	return &peerNode{
   443  		port:   port,
   444  		g:      g,
   445  		s:      sp.(*GossipStateProviderImpl),
   446  		commit: committer,
   447  		cs:     cs,
   448  		grpc:   gRPCServer,
   449  	}, port
   450  }
   451  
   452  // add metrics provider for metrics testing
   453  func newPeerNodeWithGossipWithMetrics(id int, committer committer.Committer,
   454  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, gossipMetrics *metrics.GossipMetrics) *peerNode {
   455  	logger := flogging.MustGetLogger(gossiputil.StateLogger)
   456  	node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(logger, id, committer, acceptor, g,
   457  		&validator.MockValidator{}, gossipMetrics)
   458  	return node
   459  }
   460  
   461  // Constructing pseudo peer node, simulating only gossip and state transfer part
   462  func newPeerNodeWithGossipWithValidator(logger gossiputil.Logger, id int, committer committer.Committer,
   463  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, v txvalidator.Validator, bootPorts ...int) *peerNode {
   464  	gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
   465  	node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(logger, id, committer, acceptor, g, v, gossipMetrics, bootPorts...)
   466  	return node
   467  }
   468  
   469  // Constructing pseudo peer node, simulating only gossip and state transfer part
   470  func newPeerNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor, bootPorts ...int) *peerNode {
   471  	return newPeerNodeWithGossip(id, committer, acceptor, nil, bootPorts...)
   472  }
   473  
   474  // Constructing pseudo boot node, simulating only gossip and state transfer part, return port
   475  func newBootNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor) (node *peerNode, port int) {
   476  	v := &validator.MockValidator{}
   477  	gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
   478  	logger := flogging.MustGetLogger(gossiputil.StateLogger)
   479  	return newPeerNodeWithGossipWithValidatorWithMetrics(logger, id, committer, acceptor, nil, v, gossipMetrics)
   480  }
   481  
   482  func TestStraggler(t *testing.T) {
   483  	for _, testCase := range []struct {
   484  		stateEnabled   bool
   485  		orgLeader      bool
   486  		leaderElection bool
   487  		height         uint64
   488  		receivedSeq    uint64
   489  		expected       bool
   490  	}{
   491  		{
   492  			height:         100,
   493  			receivedSeq:    300,
   494  			leaderElection: true,
   495  			expected:       true,
   496  		},
   497  		{
   498  			height:      100,
   499  			receivedSeq: 300,
   500  			expected:    true,
   501  		},
   502  		{
   503  			height:      100,
   504  			receivedSeq: 300,
   505  			orgLeader:   true,
   506  		},
   507  		{
   508  			height:         100,
   509  			receivedSeq:    105,
   510  			leaderElection: true,
   511  		},
   512  		{
   513  			height:         100,
   514  			receivedSeq:    300,
   515  			leaderElection: true,
   516  			stateEnabled:   true,
   517  		},
   518  	} {
   519  		description := fmt.Sprintf("%+v", testCase)
   520  		t.Run(description, func(t *testing.T) {
   521  			s := &GossipStateProviderImpl{
   522  				config: &StateConfig{
   523  					StateEnabled:      testCase.stateEnabled,
   524  					OrgLeader:         testCase.orgLeader,
   525  					UseLeaderElection: testCase.leaderElection,
   526  				},
   527  			}
   528  
   529  			s.straggler(testCase.height, &proto.Payload{
   530  				SeqNum: testCase.receivedSeq,
   531  			})
   532  		})
   533  	}
   534  }
   535  
   536  func TestNilDirectMsg(t *testing.T) {
   537  	mc := &mockCommitter{Mock: &mock.Mock{}}
   538  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   539  	g := &mocks.GossipMock{}
   540  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   541  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   542  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   543  	defer p.shutdown()
   544  	p.s.handleStateRequest(nil)
   545  	p.s.directMessage(nil)
   546  	sMsg, _ := protoext.NoopSign(p.s.stateRequestMessage(uint64(10), uint64(8)))
   547  	req := &comm.ReceivedMessageImpl{
   548  		SignedGossipMessage: sMsg,
   549  	}
   550  	p.s.directMessage(req)
   551  }
   552  
   553  func TestNilAddPayload(t *testing.T) {
   554  	mc := &mockCommitter{Mock: &mock.Mock{}}
   555  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   556  	g := &mocks.GossipMock{}
   557  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   558  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   559  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   560  	defer p.shutdown()
   561  	err := p.s.AddPayload(nil)
   562  	require.Error(t, err)
   563  	require.Contains(t, err.Error(), "nil")
   564  }
   565  
   566  func TestAddPayloadLedgerUnavailable(t *testing.T) {
   567  	mc := &mockCommitter{Mock: &mock.Mock{}}
   568  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   569  	g := &mocks.GossipMock{}
   570  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   571  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   572  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   573  	defer p.shutdown()
   574  	// Simulate a problem in the ledger
   575  	failedLedger := mock.Mock{}
   576  	failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger"))
   577  	mc.Lock()
   578  	mc.Mock = &failedLedger
   579  	mc.Unlock()
   580  
   581  	rawblock := protoutil.NewBlock(uint64(1), []byte{})
   582  	b, _ := pb.Marshal(rawblock)
   583  	err := p.s.AddPayload(&proto.Payload{
   584  		SeqNum: uint64(1),
   585  		Data:   b,
   586  	})
   587  	require.Error(t, err)
   588  	require.Contains(t, err.Error(), "Failed obtaining ledger height")
   589  	require.Contains(t, err.Error(), "cannot query ledger")
   590  }
   591  
   592  func TestLargeBlockGap(t *testing.T) {
   593  	// Scenario: the peer knows of a peer who has a ledger height much higher
   594  	// than itself (500 blocks higher).
   595  	// The peer needs to ask blocks in a way such that the size of the payload buffer
   596  	// never rises above a certain threshold.
   597  	mc := &mockCommitter{Mock: &mock.Mock{}}
   598  	blocksPassedToLedger := make(chan uint64, 200)
   599  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   600  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   601  	})
   602  	msgsFromPeer := make(chan protoext.ReceivedMessage)
   603  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   604  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   605  	g := &mocks.GossipMock{}
   606  	membership := []discovery.NetworkMember{
   607  		{
   608  			PKIid:    common.PKIidType("a"),
   609  			Endpoint: "a",
   610  			Properties: &proto.Properties{
   611  				LedgerHeight: 500,
   612  			},
   613  		},
   614  	}
   615  	g.On("PeersOfChannel", mock.Anything).Return(membership)
   616  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   617  	g.On("Accept", mock.Anything, true).Return(nil, msgsFromPeer)
   618  	g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   619  		msg := arguments.Get(0).(*proto.GossipMessage)
   620  		// The peer requested a state request
   621  		req := msg.GetStateRequest()
   622  		// Construct a skeleton for the response
   623  		res := &proto.GossipMessage{
   624  			Nonce:   msg.Nonce,
   625  			Channel: []byte("testchannelid"),
   626  			Content: &proto.GossipMessage_StateResponse{
   627  				StateResponse: &proto.RemoteStateResponse{},
   628  			},
   629  		}
   630  		// Populate the response with payloads according to what the peer asked
   631  		for seq := req.StartSeqNum; seq <= req.EndSeqNum; seq++ {
   632  			rawblock := protoutil.NewBlock(seq, []byte{})
   633  			b, _ := pb.Marshal(rawblock)
   634  			payload := &proto.Payload{
   635  				SeqNum: seq,
   636  				Data:   b,
   637  			}
   638  			res.GetStateResponse().Payloads = append(res.GetStateResponse().Payloads, payload)
   639  		}
   640  		// Finally, send the response down the channel the peer expects to receive it from
   641  		sMsg, _ := protoext.NoopSign(res)
   642  		msgsFromPeer <- &comm.ReceivedMessageImpl{
   643  			SignedGossipMessage: sMsg,
   644  		}
   645  	})
   646  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   647  	defer p.shutdown()
   648  
   649  	// Process blocks at a speed of 20 Millisecond for each block.
   650  	// The imaginative peer that responds to state
   651  	// If the payload buffer expands above defMaxBlockDistance*2 + defAntiEntropyBatchSize blocks, fail the test
   652  	blockProcessingTime := 20 * time.Millisecond // 10 seconds for total 500 blocks
   653  	expectedSequence := 1
   654  	for expectedSequence < 500 {
   655  		blockSeq := <-blocksPassedToLedger
   656  		require.Equal(t, expectedSequence, int(blockSeq))
   657  		// Ensure payload buffer isn't over-populated
   658  		require.True(t, p.s.payloads.Size() <= defMaxBlockDistance*2+defAntiEntropyBatchSize, "payload buffer size is %d", p.s.payloads.Size())
   659  		expectedSequence++
   660  		time.Sleep(blockProcessingTime)
   661  	}
   662  }
   663  
   664  func TestOverPopulation(t *testing.T) {
   665  	// Scenario: Add to the state provider blocks
   666  	// with a gap in between, and ensure that the payload buffer
   667  	// rejects blocks starting if the distance between the ledger height to the latest
   668  	// block it contains is bigger than defMaxBlockDistance.
   669  	mc := &mockCommitter{Mock: &mock.Mock{}}
   670  	blocksPassedToLedger := make(chan uint64, 10)
   671  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   672  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   673  	})
   674  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   675  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   676  	g := &mocks.GossipMock{}
   677  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   678  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   679  	p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
   680  	defer p.shutdown()
   681  
   682  	// Add some blocks in a sequential manner and make sure it works
   683  	for i := 1; i <= 4; i++ {
   684  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   685  		b, _ := pb.Marshal(rawblock)
   686  		require.NoError(t, p.s.addPayload(&proto.Payload{
   687  			SeqNum: uint64(i),
   688  			Data:   b,
   689  		}, nonBlocking))
   690  	}
   691  
   692  	// Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9]
   693  	// Should succeed
   694  	for i := 10; i <= defMaxBlockDistance; i++ {
   695  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   696  		b, _ := pb.Marshal(rawblock)
   697  		require.NoError(t, p.s.addPayload(&proto.Payload{
   698  			SeqNum: uint64(i),
   699  			Data:   b,
   700  		}, nonBlocking))
   701  	}
   702  
   703  	// Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10
   704  	// Should fail.
   705  	for i := defMaxBlockDistance + 1; i <= defMaxBlockDistance*10; i++ {
   706  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   707  		b, _ := pb.Marshal(rawblock)
   708  		require.Error(t, p.s.addPayload(&proto.Payload{
   709  			SeqNum: uint64(i),
   710  			Data:   b,
   711  		}, nonBlocking))
   712  	}
   713  
   714  	// Ensure only blocks 1-4 were passed to the ledger
   715  	close(blocksPassedToLedger)
   716  	i := 1
   717  	for seq := range blocksPassedToLedger {
   718  		require.Equal(t, uint64(i), seq)
   719  		i++
   720  	}
   721  	require.Equal(t, 5, i)
   722  
   723  	// Ensure we don't store too many blocks in memory
   724  	sp := p.s
   725  	require.True(t, sp.payloads.Size() < defMaxBlockDistance)
   726  }
   727  
   728  func TestBlockingEnqueue(t *testing.T) {
   729  	// Scenario: In parallel, get blocks from gossip and from the orderer.
   730  	// The blocks from the orderer we get are X2 times the amount of blocks from gossip.
   731  	// The blocks we get from gossip are random indices, to maximize disruption.
   732  	mc := &mockCommitter{Mock: &mock.Mock{}}
   733  	blocksPassedToLedger := make(chan uint64, 10)
   734  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   735  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   736  	})
   737  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   738  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   739  	g := &mocks.GossipMock{}
   740  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   741  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   742  	p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
   743  	defer p.shutdown()
   744  
   745  	numBlocksReceived := 500
   746  	receivedBlockCount := 0
   747  	// Get a block from the orderer every 1ms
   748  	go func() {
   749  		for i := 1; i <= numBlocksReceived; i++ {
   750  			rawblock := protoutil.NewBlock(uint64(i), []byte{})
   751  			b, _ := pb.Marshal(rawblock)
   752  			block := &proto.Payload{
   753  				SeqNum: uint64(i),
   754  				Data:   b,
   755  			}
   756  			p.s.AddPayload(block)
   757  			time.Sleep(time.Millisecond)
   758  		}
   759  	}()
   760  
   761  	// Get a block from gossip every 1ms too
   762  	go func() {
   763  		rand.Seed(time.Now().UnixNano())
   764  		for i := 1; i <= numBlocksReceived/2; i++ {
   765  			blockSeq := rand.Intn(numBlocksReceived)
   766  			rawblock := protoutil.NewBlock(uint64(blockSeq), []byte{})
   767  			b, _ := pb.Marshal(rawblock)
   768  			block := &proto.Payload{
   769  				SeqNum: uint64(blockSeq),
   770  				Data:   b,
   771  			}
   772  			p.s.addPayload(block, nonBlocking)
   773  			time.Sleep(time.Millisecond)
   774  		}
   775  	}()
   776  
   777  	for {
   778  		receivedBlock := <-blocksPassedToLedger
   779  		receivedBlockCount++
   780  		m := &mock.Mock{}
   781  		m.On("LedgerHeight", mock.Anything).Return(receivedBlock, nil)
   782  		m.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   783  		m.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   784  			blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   785  		})
   786  		mc.Lock()
   787  		mc.Mock = m
   788  		mc.Unlock()
   789  		require.Equal(t, receivedBlock, uint64(receivedBlockCount))
   790  		if int(receivedBlockCount) == numBlocksReceived {
   791  			break
   792  		}
   793  		time.Sleep(time.Millisecond * 10)
   794  	}
   795  }
   796  
   797  func TestHaltChainProcessing(t *testing.T) {
   798  	gossipChannel := func(c chan *proto.GossipMessage) <-chan *proto.GossipMessage {
   799  		return c
   800  	}
   801  	makeBlock := func(seq int) []byte {
   802  		b := &pcomm.Block{
   803  			Header: &pcomm.BlockHeader{
   804  				Number: uint64(seq),
   805  			},
   806  			Data: &pcomm.BlockData{
   807  				Data: [][]byte{},
   808  			},
   809  			Metadata: &pcomm.BlockMetadata{
   810  				Metadata: [][]byte{
   811  					{}, {}, {}, {},
   812  				},
   813  			},
   814  		}
   815  		data, _ := pb.Marshal(b)
   816  		return data
   817  	}
   818  	newBlockMsg := func(i int) *proto.GossipMessage {
   819  		return &proto.GossipMessage{
   820  			Channel: []byte("testchannelid"),
   821  			Content: &proto.GossipMessage_DataMsg{
   822  				DataMsg: &proto.DataMessage{
   823  					Payload: &proto.Payload{
   824  						SeqNum: uint64(i),
   825  						Data:   makeBlock(i),
   826  					},
   827  				},
   828  			},
   829  		}
   830  	}
   831  
   832  	mc := &mockCommitter{Mock: &mock.Mock{}}
   833  	mc.On("CommitLegacy", mock.Anything)
   834  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   835  	g := &mocks.GossipMock{}
   836  	gossipMsgs := make(chan *proto.GossipMessage)
   837  
   838  	g.On("Accept", mock.Anything, false).Return(gossipChannel(gossipMsgs), nil)
   839  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   840  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   841  
   842  	v := &validator.MockValidator{}
   843  	v.On("Validate").Return(&errors2.VSCCExecutionFailureError{
   844  		Err: errors.New("foobar"),
   845  	}).Once()
   846  
   847  	buf := gbytes.NewBuffer()
   848  
   849  	logger := flogging.MustGetLogger(gossiputil.StateLogger).WithOptions(zap.Hooks(func(entry zapcore.Entry) error {
   850  		buf.Write([]byte(entry.Message))
   851  		buf.Write([]byte("\n"))
   852  		return nil
   853  	}))
   854  	peerNode := newPeerNodeWithGossipWithValidator(logger, 0, mc, noopPeerIdentityAcceptor, g, v)
   855  	defer peerNode.shutdown()
   856  	gossipMsgs <- newBlockMsg(1)
   857  
   858  	gom := gomega.NewGomegaWithT(t)
   859  	gom.Eventually(buf, time.Minute).Should(gbytes.Say("Failed executing VSCC due to foobar"))
   860  	gom.Eventually(buf, time.Minute).Should(gbytes.Say("Aborting chain processing"))
   861  }
   862  
   863  func TestFailures(t *testing.T) {
   864  	mc := &mockCommitter{Mock: &mock.Mock{}}
   865  	mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil)
   866  	g := &mocks.GossipMock{}
   867  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   868  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   869  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   870  	require.Panics(t, func() {
   871  		newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   872  	})
   873  	// Reprogram mock
   874  	mc.Mock = &mock.Mock{}
   875  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger"))
   876  	require.Nil(t, newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g))
   877  }
   878  
   879  func TestGossipReception(t *testing.T) {
   880  	signalChan := make(chan struct{})
   881  	rawblock := &pcomm.Block{
   882  		Header: &pcomm.BlockHeader{
   883  			Number: uint64(1),
   884  		},
   885  		Data: &pcomm.BlockData{
   886  			Data: [][]byte{},
   887  		},
   888  		Metadata: &pcomm.BlockMetadata{
   889  			Metadata: [][]byte{
   890  				{}, {}, {}, {},
   891  			},
   892  		},
   893  	}
   894  	b, _ := pb.Marshal(rawblock)
   895  
   896  	newMsg := func(channel string) *proto.GossipMessage {
   897  		{
   898  			return &proto.GossipMessage{
   899  				Channel: []byte(channel),
   900  				Content: &proto.GossipMessage_DataMsg{
   901  					DataMsg: &proto.DataMessage{
   902  						Payload: &proto.Payload{
   903  							SeqNum: 1,
   904  							Data:   b,
   905  						},
   906  					},
   907  				},
   908  			}
   909  		}
   910  	}
   911  
   912  	createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage {
   913  		c := make(chan *proto.GossipMessage)
   914  
   915  		go func(c chan *proto.GossipMessage) {
   916  			// Wait for Accept() to be called
   917  			<-signalChan
   918  			// Simulate a message reception from the gossip component with an invalid channel
   919  			c <- newMsg("AAA")
   920  			// Simulate a message reception from the gossip component
   921  			c <- newMsg("testchannelid")
   922  		}(c)
   923  		return c
   924  	}
   925  
   926  	g := &mocks.GossipMock{}
   927  	rmc := createChan(signalChan)
   928  	g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) {
   929  		signalChan <- struct{}{}
   930  	})
   931  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   932  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   933  	mc := &mockCommitter{Mock: &mock.Mock{}}
   934  	receivedChan := make(chan struct{})
   935  	mc.On("CommitLegacy", mock.Anything).Run(func(arguments mock.Arguments) {
   936  		block := arguments.Get(0).(*pcomm.Block)
   937  		require.Equal(t, uint64(1), block.Header.Number)
   938  		receivedChan <- struct{}{}
   939  	})
   940  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   941  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   942  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   943  	defer p.shutdown()
   944  	select {
   945  	case <-receivedChan:
   946  	case <-time.After(time.Second * 15):
   947  		require.Fail(t, "Didn't commit a block within a timely manner")
   948  	}
   949  }
   950  
   951  func TestLedgerHeightFromProperties(t *testing.T) {
   952  	// Scenario: For each test, spawn a peer and supply it
   953  	// with a specific mock of PeersOfChannel from peers that
   954  	// either set both metadata properly, or only the properties, or none, or both.
   955  	// Ensure the logic handles all of the 4 possible cases as needed
   956  
   957  	// Returns whether the given networkMember was selected or not
   958  	wasNetworkMemberSelected := func(t *testing.T, networkMember discovery.NetworkMember) bool {
   959  		var wasGivenNetworkMemberSelected int32
   960  		finChan := make(chan struct{})
   961  		g := &mocks.GossipMock{}
   962  		g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   963  			msg := arguments.Get(0).(*proto.GossipMessage)
   964  			require.NotNil(t, msg.GetStateRequest())
   965  			peer := arguments.Get(1).([]*comm.RemotePeer)[0]
   966  			if bytes.Equal(networkMember.PKIid, peer.PKIID) {
   967  				atomic.StoreInt32(&wasGivenNetworkMemberSelected, 1)
   968  			}
   969  			finChan <- struct{}{}
   970  		})
   971  		g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   972  		g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   973  		defaultPeer := discovery.NetworkMember{
   974  			InternalEndpoint: "b",
   975  			PKIid:            common.PKIidType("b"),
   976  			Properties: &proto.Properties{
   977  				LedgerHeight: 5,
   978  			},
   979  		}
   980  		g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{
   981  			defaultPeer,
   982  			networkMember,
   983  		})
   984  		mc := &mockCommitter{Mock: &mock.Mock{}}
   985  		mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   986  		p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   987  		defer p.shutdown()
   988  		select {
   989  		case <-time.After(time.Second * 20):
   990  			t.Fatal("Didn't send a request within a timely manner")
   991  		case <-finChan:
   992  		}
   993  		return atomic.LoadInt32(&wasGivenNetworkMemberSelected) == 1
   994  	}
   995  
   996  	peerWithProperties := discovery.NetworkMember{
   997  		PKIid: common.PKIidType("peerWithoutMetadata"),
   998  		Properties: &proto.Properties{
   999  			LedgerHeight: 10,
  1000  		},
  1001  		InternalEndpoint: "peerWithoutMetadata",
  1002  	}
  1003  
  1004  	peerWithoutProperties := discovery.NetworkMember{
  1005  		PKIid:            common.PKIidType("peerWithoutProperties"),
  1006  		InternalEndpoint: "peerWithoutProperties",
  1007  	}
  1008  
  1009  	tests := []struct {
  1010  		shouldGivenBeSelected bool
  1011  		member                discovery.NetworkMember
  1012  	}{
  1013  		{member: peerWithProperties, shouldGivenBeSelected: true},
  1014  		{member: peerWithoutProperties, shouldGivenBeSelected: false},
  1015  	}
  1016  
  1017  	for _, tst := range tests {
  1018  		require.Equal(t, tst.shouldGivenBeSelected, wasNetworkMemberSelected(t, tst.member))
  1019  	}
  1020  }
  1021  
  1022  func TestAccessControl(t *testing.T) {
  1023  	bootstrapSetSize := 5
  1024  	bootstrapSet := make([]*peerNode, 0)
  1025  
  1026  	authorizedPeersSize := 4
  1027  	var listeners []net.Listener
  1028  	var endpoints []string
  1029  
  1030  	for i := 0; i < authorizedPeersSize; i++ {
  1031  		ll, err := net.Listen("tcp", "127.0.0.1:0")
  1032  		require.NoError(t, err)
  1033  		listeners = append(listeners, ll)
  1034  		endpoint := ll.Addr().String()
  1035  		endpoints = append(endpoints, endpoint)
  1036  	}
  1037  
  1038  	defer func() {
  1039  		for _, ll := range listeners {
  1040  			ll.Close()
  1041  		}
  1042  	}()
  1043  
  1044  	authorizedPeers := map[string]struct{}{
  1045  		endpoints[0]: {},
  1046  		endpoints[1]: {},
  1047  		endpoints[2]: {},
  1048  		endpoints[3]: {},
  1049  	}
  1050  
  1051  	blockPullPolicy := func(identity api.PeerIdentityType) error {
  1052  		if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized {
  1053  			return nil
  1054  		}
  1055  		return errors.New("Not authorized")
  1056  	}
  1057  
  1058  	var bootPorts []int
  1059  
  1060  	for i := 0; i < bootstrapSetSize; i++ {
  1061  		commit := newCommitter()
  1062  		bootPeer, bootPort := newBootNode(i, commit, blockPullPolicy)
  1063  		bootstrapSet = append(bootstrapSet, bootPeer)
  1064  		bootPorts = append(bootPorts, bootPort)
  1065  	}
  1066  
  1067  	defer func() {
  1068  		for _, p := range bootstrapSet {
  1069  			p.shutdown()
  1070  		}
  1071  	}()
  1072  
  1073  	msgCount := 5
  1074  
  1075  	for i := 1; i <= msgCount; i++ {
  1076  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1077  		if b, err := pb.Marshal(rawblock); err == nil {
  1078  			payload := &proto.Payload{
  1079  				SeqNum: uint64(i),
  1080  				Data:   b,
  1081  			}
  1082  			bootstrapSet[0].s.AddPayload(payload)
  1083  		} else {
  1084  			t.Fail()
  1085  		}
  1086  	}
  1087  
  1088  	standardPeerSetSize := 10
  1089  	peersSet := make([]*peerNode, 0)
  1090  
  1091  	for i := 0; i < standardPeerSetSize; i++ {
  1092  		commit := newCommitter()
  1093  		peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, blockPullPolicy, bootPorts...))
  1094  	}
  1095  
  1096  	defer func() {
  1097  		for _, p := range peersSet {
  1098  			p.shutdown()
  1099  		}
  1100  	}()
  1101  
  1102  	waitUntilTrueOrTimeout(t, func() bool {
  1103  		for _, p := range peersSet {
  1104  			if len(p.g.PeersOfChannel(common.ChannelID("testchannelid"))) != bootstrapSetSize+standardPeerSetSize-1 {
  1105  				t.Log("Peer discovery has not finished yet")
  1106  				return false
  1107  			}
  1108  		}
  1109  		t.Log("All peer discovered each other!!!")
  1110  		return true
  1111  	}, 30*time.Second)
  1112  
  1113  	t.Log("Waiting for all blocks to arrive.")
  1114  	waitUntilTrueOrTimeout(t, func() bool {
  1115  		t.Log("Trying to see all authorized peers get all blocks, and all non-authorized didn't")
  1116  		for _, p := range peersSet {
  1117  			height, err := p.commit.LedgerHeight()
  1118  			id := fmt.Sprintf("127.0.0.1:%d", p.port)
  1119  			if _, isAuthorized := authorizedPeers[id]; isAuthorized {
  1120  				if height != uint64(msgCount+1) || err != nil {
  1121  					return false
  1122  				}
  1123  			} else {
  1124  				if err == nil && height > 1 {
  1125  					require.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height)
  1126  				}
  1127  			}
  1128  		}
  1129  		t.Log("All peers have same ledger height!!!")
  1130  		return true
  1131  	}, 60*time.Second)
  1132  }
  1133  
  1134  func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) {
  1135  	bootstrapSetSize := 5
  1136  	bootstrapSet := make([]*peerNode, 0)
  1137  
  1138  	var bootPorts []int
  1139  
  1140  	for i := 0; i < bootstrapSetSize; i++ {
  1141  		commit := newCommitter()
  1142  		bootPeer, bootPort := newBootNode(i, commit, noopPeerIdentityAcceptor)
  1143  		bootstrapSet = append(bootstrapSet, bootPeer)
  1144  		bootPorts = append(bootPorts, bootPort)
  1145  	}
  1146  
  1147  	defer func() {
  1148  		for _, p := range bootstrapSet {
  1149  			p.shutdown()
  1150  		}
  1151  	}()
  1152  
  1153  	msgCount := 10
  1154  
  1155  	for i := 1; i <= msgCount; i++ {
  1156  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1157  		if b, err := pb.Marshal(rawblock); err == nil {
  1158  			payload := &proto.Payload{
  1159  				SeqNum: uint64(i),
  1160  				Data:   b,
  1161  			}
  1162  			bootstrapSet[0].s.AddPayload(payload)
  1163  		} else {
  1164  			t.Fail()
  1165  		}
  1166  	}
  1167  
  1168  	standartPeersSize := 10
  1169  	peersSet := make([]*peerNode, 0)
  1170  
  1171  	for i := 0; i < standartPeersSize; i++ {
  1172  		commit := newCommitter()
  1173  		peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, noopPeerIdentityAcceptor, bootPorts...))
  1174  	}
  1175  
  1176  	defer func() {
  1177  		for _, p := range peersSet {
  1178  			p.shutdown()
  1179  		}
  1180  	}()
  1181  
  1182  	waitUntilTrueOrTimeout(t, func() bool {
  1183  		for _, p := range peersSet {
  1184  			if len(p.g.PeersOfChannel(common.ChannelID("testchannelid"))) != bootstrapSetSize+standartPeersSize-1 {
  1185  				t.Log("Peer discovery has not finished yet")
  1186  				return false
  1187  			}
  1188  		}
  1189  		t.Log("All peer discovered each other!!!")
  1190  		return true
  1191  	}, 30*time.Second)
  1192  
  1193  	t.Log("Waiting for all blocks to arrive.")
  1194  	waitUntilTrueOrTimeout(t, func() bool {
  1195  		t.Log("Trying to see all peers get all blocks")
  1196  		for _, p := range peersSet {
  1197  			height, err := p.commit.LedgerHeight()
  1198  			if height != uint64(msgCount+1) || err != nil {
  1199  				return false
  1200  			}
  1201  		}
  1202  		t.Log("All peers have same ledger height!!!")
  1203  		return true
  1204  	}, 60*time.Second)
  1205  }
  1206  
  1207  // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into
  1208  // local ledger, next spawning a new peer waiting for anti-entropy procedure to
  1209  // complete missing blocks. Since state transfer messages now batched, it is expected
  1210  // to see _exactly_ two messages with state transfer response.
  1211  func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) {
  1212  	bootPeer, bootPort := newBootNode(0, newCommitter(), noopPeerIdentityAcceptor)
  1213  	defer bootPeer.shutdown()
  1214  
  1215  	msgCount := defAntiEntropyBatchSize + 5
  1216  	expectedMessagesCnt := 2
  1217  
  1218  	for i := 1; i <= msgCount; i++ {
  1219  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1220  		if b, err := pb.Marshal(rawblock); err == nil {
  1221  			payload := &proto.Payload{
  1222  				SeqNum: uint64(i),
  1223  				Data:   b,
  1224  			}
  1225  			bootPeer.s.AddPayload(payload)
  1226  		} else {
  1227  			t.Fail()
  1228  		}
  1229  	}
  1230  
  1231  	peer := newPeerNode(1, newCommitter(), noopPeerIdentityAcceptor, bootPort)
  1232  	defer peer.shutdown()
  1233  
  1234  	naiveStateMsgPredicate := func(message interface{}) bool {
  1235  		return protoext.IsRemoteStateMessage(message.(protoext.ReceivedMessage).GetGossipMessage().GossipMessage)
  1236  	}
  1237  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
  1238  
  1239  	wg := sync.WaitGroup{}
  1240  	wg.Add(expectedMessagesCnt)
  1241  
  1242  	// Number of submitted messages is defAntiEntropyBatchSize + 5, therefore
  1243  	// expected number of batches is expectedMessagesCnt = 2. Following go routine
  1244  	// makes sure it receives expected amount of messages and sends signal of success
  1245  	// to continue the test
  1246  	go func() {
  1247  		for count := 0; count < expectedMessagesCnt; count++ {
  1248  			<-peerCh
  1249  			wg.Done()
  1250  		}
  1251  	}()
  1252  
  1253  	// Once we got message which indicate of two batches being received,
  1254  	// making sure messages indeed committed.
  1255  	waitUntilTrueOrTimeout(t, func() bool {
  1256  		if len(peer.g.PeersOfChannel(common.ChannelID("testchannelid"))) != 1 {
  1257  			t.Log("Peer discovery has not finished yet")
  1258  			return false
  1259  		}
  1260  		t.Log("All peer discovered each other!!!")
  1261  		return true
  1262  	}, 30*time.Second)
  1263  
  1264  	// Waits for message which indicates that expected number of message batches received
  1265  	// otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds
  1266  	wg.Wait()
  1267  
  1268  	t.Log("Waiting for all blocks to arrive.")
  1269  	waitUntilTrueOrTimeout(t, func() bool {
  1270  		t.Log("Trying to see all peers get all blocks")
  1271  		height, err := peer.commit.LedgerHeight()
  1272  		if height != uint64(msgCount+1) || err != nil {
  1273  			return false
  1274  		}
  1275  		t.Log("All peers have same ledger height!!!")
  1276  		return true
  1277  	}, 60*time.Second)
  1278  }
  1279  
  1280  // coordinatorMock mocking structure to capture mock interface for
  1281  // coord to simulate coord flow during the test
  1282  type coordinatorMock struct {
  1283  	committer.Committer
  1284  	mock.Mock
  1285  }
  1286  
  1287  func (mock *coordinatorMock) GetPvtDataAndBlockByNum(seqNum uint64, _ protoutil.SignedData) (*pcomm.Block, gossiputil.PvtDataCollections, error) {
  1288  	args := mock.Called(seqNum)
  1289  	return args.Get(0).(*pcomm.Block), args.Get(1).(gossiputil.PvtDataCollections), args.Error(2)
  1290  }
  1291  
  1292  func (mock *coordinatorMock) GetBlockByNum(seqNum uint64) (*pcomm.Block, error) {
  1293  	args := mock.Called(seqNum)
  1294  	return args.Get(0).(*pcomm.Block), args.Error(1)
  1295  }
  1296  
  1297  func (mock *coordinatorMock) StoreBlock(block *pcomm.Block, data gossiputil.PvtDataCollections) error {
  1298  	args := mock.Called(block, data)
  1299  	return args.Error(1)
  1300  }
  1301  
  1302  func (mock *coordinatorMock) LedgerHeight() (uint64, error) {
  1303  	args := mock.Called()
  1304  	return args.Get(0).(uint64), args.Error(1)
  1305  }
  1306  
  1307  func (mock *coordinatorMock) Close() {
  1308  	mock.Called()
  1309  }
  1310  
  1311  // StorePvtData used to persist private data into transient store
  1312  func (mock *coordinatorMock) StorePvtData(txid string, privData *tspb.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error {
  1313  	return mock.Called().Error(0)
  1314  }
  1315  
  1316  type receivedMessageMock struct {
  1317  	mock.Mock
  1318  }
  1319  
  1320  // Ack returns to the sender an acknowledgement for the message
  1321  func (mock *receivedMessageMock) Ack(err error) {
  1322  }
  1323  
  1324  func (mock *receivedMessageMock) Respond(msg *proto.GossipMessage) {
  1325  	mock.Called(msg)
  1326  }
  1327  
  1328  func (mock *receivedMessageMock) GetGossipMessage() *protoext.SignedGossipMessage {
  1329  	args := mock.Called()
  1330  	return args.Get(0).(*protoext.SignedGossipMessage)
  1331  }
  1332  
  1333  func (mock *receivedMessageMock) GetSourceEnvelope() *proto.Envelope {
  1334  	args := mock.Called()
  1335  	return args.Get(0).(*proto.Envelope)
  1336  }
  1337  
  1338  func (mock *receivedMessageMock) GetConnectionInfo() *protoext.ConnectionInfo {
  1339  	args := mock.Called()
  1340  	return args.Get(0).(*protoext.ConnectionInfo)
  1341  }
  1342  
  1343  type testData struct {
  1344  	block   *pcomm.Block
  1345  	pvtData gossiputil.PvtDataCollections
  1346  }
  1347  
  1348  func TestTransferOfPrivateRWSet(t *testing.T) {
  1349  	chainID := "testChainID"
  1350  
  1351  	// First gossip instance
  1352  	g := &mocks.GossipMock{}
  1353  	coord1 := new(coordinatorMock)
  1354  
  1355  	gossipChannel := make(chan *proto.GossipMessage)
  1356  	commChannel := make(chan protoext.ReceivedMessage)
  1357  
  1358  	gossipChannelFactory := func(ch chan *proto.GossipMessage) <-chan *proto.GossipMessage {
  1359  		return ch
  1360  	}
  1361  
  1362  	g.On("Accept", mock.Anything, false).Return(gossipChannelFactory(gossipChannel), nil)
  1363  	g.On("Accept", mock.Anything, true).Return(nil, commChannel)
  1364  
  1365  	g.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
  1366  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
  1367  	g.On("Close")
  1368  
  1369  	coord1.On("LedgerHeight", mock.Anything).Return(uint64(5), nil)
  1370  
  1371  	data := map[uint64]*testData{
  1372  		uint64(2): {
  1373  			block: &pcomm.Block{
  1374  				Header: &pcomm.BlockHeader{
  1375  					Number:       2,
  1376  					DataHash:     []byte{0, 1, 1, 1},
  1377  					PreviousHash: []byte{0, 0, 0, 1},
  1378  				},
  1379  				Data: &pcomm.BlockData{
  1380  					Data: [][]byte{{1}, {2}, {3}},
  1381  				},
  1382  			},
  1383  			pvtData: gossiputil.PvtDataCollections{
  1384  				{
  1385  					SeqInBlock: uint64(0),
  1386  					WriteSet: &rwset.TxPvtReadWriteSet{
  1387  						DataModel: rwset.TxReadWriteSet_KV,
  1388  						NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1389  							{
  1390  								Namespace: "myCC:v1",
  1391  								CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1392  									{
  1393  										CollectionName: "mysecrectCollection",
  1394  										Rwset:          []byte{1, 2, 3, 4, 5},
  1395  									},
  1396  								},
  1397  							},
  1398  						},
  1399  					},
  1400  				},
  1401  			},
  1402  		},
  1403  
  1404  		uint64(3): {
  1405  			block: &pcomm.Block{
  1406  				Header: &pcomm.BlockHeader{
  1407  					Number:       3,
  1408  					DataHash:     []byte{1, 1, 1, 1},
  1409  					PreviousHash: []byte{0, 1, 1, 1},
  1410  				},
  1411  				Data: &pcomm.BlockData{
  1412  					Data: [][]byte{{4}, {5}, {6}},
  1413  				},
  1414  			},
  1415  			pvtData: gossiputil.PvtDataCollections{
  1416  				{
  1417  					SeqInBlock: uint64(2),
  1418  					WriteSet: &rwset.TxPvtReadWriteSet{
  1419  						DataModel: rwset.TxReadWriteSet_KV,
  1420  						NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1421  							{
  1422  								Namespace: "otherCC:v1",
  1423  								CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1424  									{
  1425  										CollectionName: "topClassified",
  1426  										Rwset:          []byte{0, 0, 0, 4, 2},
  1427  									},
  1428  								},
  1429  							},
  1430  						},
  1431  					},
  1432  				},
  1433  			},
  1434  		},
  1435  	}
  1436  
  1437  	for seqNum, each := range data {
  1438  		coord1.On("GetPvtDataAndBlockByNum", seqNum).Return(each.block, each.pvtData, nil /* no error*/)
  1439  	}
  1440  
  1441  	coord1.On("Close")
  1442  
  1443  	servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}}
  1444  	stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
  1445  	stateConfig := &StateConfig{
  1446  		StateCheckInterval:   DefStateCheckInterval,
  1447  		StateResponseTimeout: DefStateResponseTimeout,
  1448  		StateBatchSize:       DefStateBatchSize,
  1449  		StateMaxRetries:      DefStateMaxRetries,
  1450  		StateBlockBufferSize: DefStateBlockBufferSize,
  1451  		StateChannelSize:     DefStateChannelSize,
  1452  		StateEnabled:         true,
  1453  	}
  1454  	logger := flogging.MustGetLogger(gossiputil.StateLogger)
  1455  	st := NewGossipStateProvider(logger, chainID, servicesAdapater, coord1, stateMetrics, blocking, stateConfig)
  1456  	defer st.Stop()
  1457  
  1458  	// Mocked state request message
  1459  	requestMsg := new(receivedMessageMock)
  1460  
  1461  	// Get state request message, blocks [2...3]
  1462  	requestGossipMsg := &proto.GossipMessage{
  1463  		// Copy nonce field from the request, so it will be possible to match response
  1464  		Nonce:   1,
  1465  		Tag:     proto.GossipMessage_CHAN_OR_ORG,
  1466  		Channel: []byte(chainID),
  1467  		Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{
  1468  			StartSeqNum: 2,
  1469  			EndSeqNum:   3,
  1470  		}},
  1471  	}
  1472  
  1473  	msg, _ := protoext.NoopSign(requestGossipMsg)
  1474  
  1475  	requestMsg.On("GetGossipMessage").Return(msg)
  1476  	requestMsg.On("GetConnectionInfo").Return(&protoext.ConnectionInfo{
  1477  		Auth: &protoext.AuthInfo{},
  1478  	})
  1479  
  1480  	// Channel to send responses back
  1481  	responseChannel := make(chan protoext.ReceivedMessage)
  1482  	defer close(responseChannel)
  1483  
  1484  	requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
  1485  		// Get gossip response to respond back on state request
  1486  		response := args.Get(0).(*proto.GossipMessage)
  1487  		// Wrap it up into received response
  1488  		receivedMsg := new(receivedMessageMock)
  1489  		// Create sign response
  1490  		msg, _ := protoext.NoopSign(response)
  1491  		// Mock to respond
  1492  		receivedMsg.On("GetGossipMessage").Return(msg)
  1493  		// Send response
  1494  		responseChannel <- receivedMsg
  1495  	})
  1496  
  1497  	// Send request message via communication channel into state transfer
  1498  	commChannel <- requestMsg
  1499  
  1500  	// State transfer request should result in state response back
  1501  	response := <-responseChannel
  1502  
  1503  	// Start the assertion section
  1504  	stateResponse := response.GetGossipMessage().GetStateResponse()
  1505  
  1506  	assertion := require.New(t)
  1507  	// Nonce should be equal to Nonce of the request
  1508  	assertion.Equal(response.GetGossipMessage().Nonce, uint64(1))
  1509  	// Payload should not need be nil
  1510  	assertion.NotNil(stateResponse)
  1511  	assertion.NotNil(stateResponse.Payloads)
  1512  	// Exactly two messages expected
  1513  	assertion.Equal(len(stateResponse.Payloads), 2)
  1514  
  1515  	// Assert we have all data and it's same as we expected it
  1516  	for _, each := range stateResponse.Payloads {
  1517  		block := &pcomm.Block{}
  1518  		err := pb.Unmarshal(each.Data, block)
  1519  		assertion.NoError(err)
  1520  
  1521  		assertion.NotNil(block.Header)
  1522  
  1523  		testBlock, ok := data[block.Header.Number]
  1524  		assertion.True(ok)
  1525  
  1526  		for i, d := range testBlock.block.Data.Data {
  1527  			assertion.True(bytes.Equal(d, block.Data.Data[i]))
  1528  		}
  1529  
  1530  		for i, p := range testBlock.pvtData {
  1531  			pvtDataPayload := &proto.PvtDataPayload{}
  1532  			err := pb.Unmarshal(each.PrivateData[i], pvtDataPayload)
  1533  			assertion.NoError(err)
  1534  			pvtRWSet := &rwset.TxPvtReadWriteSet{}
  1535  			err = pb.Unmarshal(pvtDataPayload.Payload, pvtRWSet)
  1536  			assertion.NoError(err)
  1537  			assertion.True(pb.Equal(p.WriteSet, pvtRWSet))
  1538  		}
  1539  	}
  1540  }
  1541  
  1542  type testPeer struct {
  1543  	*mocks.GossipMock
  1544  	id            string
  1545  	gossipChannel chan *proto.GossipMessage
  1546  	commChannel   chan protoext.ReceivedMessage
  1547  	coord         *coordinatorMock
  1548  }
  1549  
  1550  func (t testPeer) Gossip() <-chan *proto.GossipMessage {
  1551  	return t.gossipChannel
  1552  }
  1553  
  1554  func (t testPeer) Comm() chan protoext.ReceivedMessage {
  1555  	return t.commChannel
  1556  }
  1557  
  1558  var peers = map[string]testPeer{
  1559  	"peer1": {
  1560  		id:            "peer1",
  1561  		gossipChannel: make(chan *proto.GossipMessage),
  1562  		commChannel:   make(chan protoext.ReceivedMessage),
  1563  		GossipMock:    &mocks.GossipMock{},
  1564  		coord:         new(coordinatorMock),
  1565  	},
  1566  	"peer2": {
  1567  		id:            "peer2",
  1568  		gossipChannel: make(chan *proto.GossipMessage),
  1569  		commChannel:   make(chan protoext.ReceivedMessage),
  1570  		GossipMock:    &mocks.GossipMock{},
  1571  		coord:         new(coordinatorMock),
  1572  	},
  1573  }
  1574  
  1575  func TestTransferOfPvtDataBetweenPeers(t *testing.T) {
  1576  	/*
  1577  	   This test covers pretty basic scenario, there are two peers: "peer1" and "peer2",
  1578  	   while peer2 missing a few blocks in the ledger therefore asking to replicate those
  1579  	   blocks from the first peers.
  1580  
  1581  	   Test going to check that block from one peer will be replicated into second one and
  1582  	   have identical content.
  1583  	*/
  1584  	chainID := "testChainID"
  1585  
  1586  	// Initialize peer
  1587  	for _, peer := range peers {
  1588  		peer.On("Accept", mock.Anything, false).Return(peer.Gossip(), nil)
  1589  
  1590  		peer.On("Accept", mock.Anything, true).
  1591  			Return(nil, peer.Comm()).
  1592  			Once().
  1593  			On("Accept", mock.Anything, true).
  1594  			Return(nil, make(chan protoext.ReceivedMessage))
  1595  
  1596  		peer.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
  1597  		peer.coord.On("Close")
  1598  		peer.On("Close")
  1599  	}
  1600  
  1601  	// First peer going to have more advanced ledger
  1602  	peers["peer1"].coord.On("LedgerHeight", mock.Anything).Return(uint64(3), nil)
  1603  
  1604  	// Second peer has a gap of one block, hence it will have to replicate it from previous
  1605  	peers["peer2"].coord.On("LedgerHeight", mock.Anything).Return(uint64(2), nil)
  1606  
  1607  	peers["peer1"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
  1608  	peers["peer2"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
  1609  
  1610  	peers["peer1"].coord.On("GetPvtDataAndBlockByNum", uint64(2)).Return(&pcomm.Block{
  1611  		Header: &pcomm.BlockHeader{
  1612  			Number:       2,
  1613  			DataHash:     []byte{0, 0, 0, 1},
  1614  			PreviousHash: []byte{0, 1, 1, 1},
  1615  		},
  1616  		Data: &pcomm.BlockData{
  1617  			Data: [][]byte{{4}, {5}, {6}},
  1618  		},
  1619  	}, gossiputil.PvtDataCollections{&ledger.TxPvtData{
  1620  		SeqInBlock: uint64(1),
  1621  		WriteSet: &rwset.TxPvtReadWriteSet{
  1622  			DataModel: rwset.TxReadWriteSet_KV,
  1623  			NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1624  				{
  1625  					Namespace: "myCC:v1",
  1626  					CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1627  						{
  1628  							CollectionName: "mysecrectCollection",
  1629  							Rwset:          []byte{1, 2, 3, 4, 5},
  1630  						},
  1631  					},
  1632  				},
  1633  			},
  1634  		},
  1635  	}}, nil)
  1636  
  1637  	// Return membership of the peers
  1638  	member2 := discovery.NetworkMember{
  1639  		PKIid:            common.PKIidType([]byte{2}),
  1640  		Endpoint:         "peer2:7051",
  1641  		InternalEndpoint: "peer2:7051",
  1642  		Properties: &proto.Properties{
  1643  			LedgerHeight: 2,
  1644  		},
  1645  	}
  1646  
  1647  	member1 := discovery.NetworkMember{
  1648  		PKIid:            common.PKIidType([]byte{1}),
  1649  		Endpoint:         "peer1:7051",
  1650  		InternalEndpoint: "peer1:7051",
  1651  		Properties: &proto.Properties{
  1652  			LedgerHeight: 3,
  1653  		},
  1654  	}
  1655  
  1656  	peers["peer1"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member2})
  1657  	peers["peer2"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member1})
  1658  
  1659  	peers["peer2"].On("Send", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
  1660  		request := args.Get(0).(*proto.GossipMessage)
  1661  		requestMsg := new(receivedMessageMock)
  1662  		msg, _ := protoext.NoopSign(request)
  1663  		requestMsg.On("GetGossipMessage").Return(msg)
  1664  		requestMsg.On("GetConnectionInfo").Return(&protoext.ConnectionInfo{
  1665  			Auth: &protoext.AuthInfo{},
  1666  		})
  1667  
  1668  		requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
  1669  			response := args.Get(0).(*proto.GossipMessage)
  1670  			receivedMsg := new(receivedMessageMock)
  1671  			msg, _ := protoext.NoopSign(response)
  1672  			receivedMsg.On("GetGossipMessage").Return(msg)
  1673  			// Send response back to the peer
  1674  			peers["peer2"].commChannel <- receivedMsg
  1675  		})
  1676  
  1677  		peers["peer1"].commChannel <- requestMsg
  1678  	})
  1679  
  1680  	wg := sync.WaitGroup{}
  1681  	wg.Add(1)
  1682  	peers["peer2"].coord.On("StoreBlock", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
  1683  		wg.Done() // Done once second peer hits commit of the block
  1684  	}).Return([]string{}, nil) // No pvt data to complete and no error
  1685  
  1686  	cryptoService := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
  1687  
  1688  	stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
  1689  
  1690  	mediator := &ServicesMediator{GossipAdapter: peers["peer1"], MCSAdapter: cryptoService}
  1691  	stateConfig := &StateConfig{
  1692  		StateCheckInterval:   DefStateCheckInterval,
  1693  		StateResponseTimeout: DefStateResponseTimeout,
  1694  		StateBatchSize:       DefStateBatchSize,
  1695  		StateMaxRetries:      DefStateMaxRetries,
  1696  		StateBlockBufferSize: DefStateBlockBufferSize,
  1697  		StateChannelSize:     DefStateChannelSize,
  1698  		StateEnabled:         true,
  1699  	}
  1700  	logger := flogging.MustGetLogger(gossiputil.StateLogger)
  1701  	peer1State := NewGossipStateProvider(logger, chainID, mediator, peers["peer1"].coord, stateMetrics, blocking, stateConfig)
  1702  	defer peer1State.Stop()
  1703  
  1704  	mediator = &ServicesMediator{GossipAdapter: peers["peer2"], MCSAdapter: cryptoService}
  1705  	logger = flogging.MustGetLogger(gossiputil.StateLogger)
  1706  	peer2State := NewGossipStateProvider(logger, chainID, mediator, peers["peer2"].coord, stateMetrics, blocking, stateConfig)
  1707  	defer peer2State.Stop()
  1708  
  1709  	// Make sure state was replicated
  1710  	done := make(chan struct{})
  1711  	go func() {
  1712  		wg.Wait()
  1713  		done <- struct{}{}
  1714  	}()
  1715  
  1716  	select {
  1717  	case <-done:
  1718  		break
  1719  	case <-time.After(30 * time.Second):
  1720  		t.Fail()
  1721  	}
  1722  }
  1723  
  1724  func TestStateRequestValidator(t *testing.T) {
  1725  	validator := &stateRequestValidator{}
  1726  	err := validator.validate(&proto.RemoteStateRequest{
  1727  		StartSeqNum: 10,
  1728  		EndSeqNum:   5,
  1729  	}, defAntiEntropyBatchSize)
  1730  	require.Contains(t, err.Error(), "Invalid sequence interval [10...5).")
  1731  	require.Error(t, err)
  1732  
  1733  	err = validator.validate(&proto.RemoteStateRequest{
  1734  		StartSeqNum: 10,
  1735  		EndSeqNum:   30,
  1736  	}, defAntiEntropyBatchSize)
  1737  	require.Contains(t, err.Error(), "Requesting blocks range [10-30) greater than configured")
  1738  	require.Error(t, err)
  1739  
  1740  	err = validator.validate(&proto.RemoteStateRequest{
  1741  		StartSeqNum: 10,
  1742  		EndSeqNum:   20,
  1743  	}, defAntiEntropyBatchSize)
  1744  	require.NoError(t, err)
  1745  }
  1746  
  1747  func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) {
  1748  	ch := make(chan struct{})
  1749  	t.Log("Started to spin off, until predicate will be satisfied.")
  1750  
  1751  	go func() {
  1752  		t := time.NewTicker(time.Second)
  1753  		for !predicate() {
  1754  			select {
  1755  			case <-ch:
  1756  				t.Stop()
  1757  				return
  1758  			case <-t.C:
  1759  			}
  1760  		}
  1761  		t.Stop()
  1762  		close(ch)
  1763  	}()
  1764  
  1765  	select {
  1766  	case <-ch:
  1767  		t.Log("Done.")
  1768  		break
  1769  	case <-time.After(timeout):
  1770  		t.Fatal("Timeout has expired")
  1771  		close(ch)
  1772  		break
  1773  	}
  1774  	t.Log("Stop waiting until timeout or true")
  1775  }