github.com/kaituanwang/hyperledger@v2.0.1+incompatible/gossip/state/state_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package state
     8  
     9  import (
    10  	"bytes"
    11  	"errors"
    12  	"fmt"
    13  	"math/rand"
    14  	"net"
    15  	"strings"
    16  	"sync"
    17  	"sync/atomic"
    18  	"testing"
    19  	"time"
    20  
    21  	pb "github.com/golang/protobuf/proto"
    22  	pcomm "github.com/hyperledger/fabric-protos-go/common"
    23  	proto "github.com/hyperledger/fabric-protos-go/gossip"
    24  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    25  	tspb "github.com/hyperledger/fabric-protos-go/transientstore"
    26  	"github.com/hyperledger/fabric/bccsp/factory"
    27  	"github.com/hyperledger/fabric/common/configtx/test"
    28  	errors2 "github.com/hyperledger/fabric/common/errors"
    29  	"github.com/hyperledger/fabric/common/flogging"
    30  	"github.com/hyperledger/fabric/common/metrics/disabled"
    31  	corecomm "github.com/hyperledger/fabric/core/comm"
    32  	"github.com/hyperledger/fabric/core/committer"
    33  	"github.com/hyperledger/fabric/core/committer/txvalidator"
    34  	"github.com/hyperledger/fabric/core/ledger"
    35  	"github.com/hyperledger/fabric/core/mocks/validator"
    36  	"github.com/hyperledger/fabric/core/transientstore"
    37  	"github.com/hyperledger/fabric/gossip/api"
    38  	"github.com/hyperledger/fabric/gossip/comm"
    39  	"github.com/hyperledger/fabric/gossip/common"
    40  	"github.com/hyperledger/fabric/gossip/discovery"
    41  	"github.com/hyperledger/fabric/gossip/gossip"
    42  	"github.com/hyperledger/fabric/gossip/gossip/algo"
    43  	"github.com/hyperledger/fabric/gossip/gossip/channel"
    44  	"github.com/hyperledger/fabric/gossip/metrics"
    45  	"github.com/hyperledger/fabric/gossip/privdata"
    46  	capabilitymock "github.com/hyperledger/fabric/gossip/privdata/mocks"
    47  	"github.com/hyperledger/fabric/gossip/protoext"
    48  	"github.com/hyperledger/fabric/gossip/state/mocks"
    49  	gossiputil "github.com/hyperledger/fabric/gossip/util"
    50  	gutil "github.com/hyperledger/fabric/gossip/util"
    51  	"github.com/hyperledger/fabric/protoutil"
    52  	"github.com/onsi/gomega/gbytes"
    53  	"github.com/stretchr/testify/assert"
    54  	"github.com/stretchr/testify/mock"
    55  )
    56  
    57  var (
    58  	orgID = []byte("ORG1")
    59  
    60  	noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error {
    61  		return nil
    62  	}
    63  )
    64  
    65  type peerIdentityAcceptor func(identity api.PeerIdentityType) error
    66  
    67  type joinChanMsg struct {
    68  }
    69  
    70  func init() {
    71  	gutil.SetupTestLogging()
    72  	factory.InitFactories(nil)
    73  }
    74  
    75  // SequenceNumber returns the sequence number of the block that the message
    76  // is derived from
    77  func (*joinChanMsg) SequenceNumber() uint64 {
    78  	return uint64(time.Now().UnixNano())
    79  }
    80  
    81  // Members returns the organizations of the channel
    82  func (jcm *joinChanMsg) Members() []api.OrgIdentityType {
    83  	return []api.OrgIdentityType{orgID}
    84  }
    85  
    86  // AnchorPeersOf returns the anchor peers of the given organization
    87  func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
    88  	return []api.AnchorPeer{}
    89  }
    90  
    91  type orgCryptoService struct {
    92  }
    93  
    94  // OrgByPeerIdentity returns the OrgIdentityType
    95  // of a given peer identity
    96  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
    97  	return orgID
    98  }
    99  
   100  // Verify verifies a JoinChannelMessage, returns nil on success,
   101  // and an error on failure
   102  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
   103  	return nil
   104  }
   105  
   106  type cryptoServiceMock struct {
   107  	acceptor peerIdentityAcceptor
   108  }
   109  
   110  func (cryptoServiceMock) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) {
   111  	return time.Now().Add(time.Hour), nil
   112  }
   113  
   114  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   115  func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType {
   116  	return common.PKIidType(peerIdentity)
   117  }
   118  
   119  // VerifyBlock returns nil if the block is properly signed,
   120  // else returns error
   121  func (*cryptoServiceMock) VerifyBlock(channelID common.ChannelID, seqNum uint64, signedBlock *pcomm.Block) error {
   122  	return nil
   123  }
   124  
   125  // Sign signs msg with this peer's signing key and outputs
   126  // the signature if no error occurred.
   127  func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) {
   128  	clone := make([]byte, len(msg))
   129  	copy(clone, msg)
   130  	return clone, nil
   131  }
   132  
   133  // Verify checks that signature is a valid signature of message under a peer's verification key.
   134  // If the verification succeeded, Verify returns nil meaning no error occurred.
   135  // If peerCert is nil, then the signature is verified against this peer's verification key.
   136  func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   137  	equal := bytes.Equal(signature, message)
   138  	if !equal {
   139  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   140  	}
   141  	return nil
   142  }
   143  
   144  // VerifyByChannel checks that signature is a valid signature of message
   145  // under a peer's verification key, but also in the context of a specific channel.
   146  // If the verification succeeded, Verify returns nil meaning no error occurred.
   147  // If peerIdentity is nil, then the signature is verified against this peer's verification key.
   148  func (cs *cryptoServiceMock) VerifyByChannel(channelID common.ChannelID, peerIdentity api.PeerIdentityType, signature, message []byte) error {
   149  	return cs.acceptor(peerIdentity)
   150  }
   151  
   152  func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   153  	return nil
   154  }
   155  
   156  func bootPeersWithPorts(ports ...int) []string {
   157  	var peers []string
   158  	for _, port := range ports {
   159  		peers = append(peers, fmt.Sprintf("127.0.0.1:%d", port))
   160  	}
   161  	return peers
   162  }
   163  
   164  type peerNodeGossipSupport interface {
   165  	GossipAdapter
   166  	Stop()
   167  	JoinChan(joinMsg api.JoinChannelMessage, channelID common.ChannelID)
   168  }
   169  
   170  // Simple presentation of peer which includes only
   171  // communication module, gossip and state transfer
   172  type peerNode struct {
   173  	port   int
   174  	g      peerNodeGossipSupport
   175  	s      *GossipStateProviderImpl
   176  	cs     *cryptoServiceMock
   177  	commit committer.Committer
   178  	grpc   *corecomm.GRPCServer
   179  }
   180  
   181  // Shutting down all modules used
   182  func (node *peerNode) shutdown() {
   183  	node.s.Stop()
   184  	node.g.Stop()
   185  	node.grpc.Stop()
   186  }
   187  
   188  type mockCommitter struct {
   189  	*mock.Mock
   190  	sync.Mutex
   191  }
   192  
   193  func (mc *mockCommitter) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   194  	args := mc.Called()
   195  	return args.Get(0).(ledger.ConfigHistoryRetriever), args.Error(1)
   196  }
   197  
   198  func (mc *mockCommitter) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   199  	args := mc.Called(blockNum, filter)
   200  	return args.Get(0).([]*ledger.TxPvtData), args.Error(1)
   201  }
   202  
   203  func (mc *mockCommitter) CommitLegacy(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   204  	mc.Lock()
   205  	m := mc.Mock
   206  	mc.Unlock()
   207  	m.Called(blockAndPvtData.Block)
   208  	return nil
   209  }
   210  
   211  func (mc *mockCommitter) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) {
   212  	args := mc.Called(seqNum)
   213  	return args.Get(0).(*ledger.BlockAndPvtData), args.Error(1)
   214  }
   215  
   216  func (mc *mockCommitter) LedgerHeight() (uint64, error) {
   217  	mc.Lock()
   218  	m := mc.Mock
   219  	mc.Unlock()
   220  	args := m.Called()
   221  	if args.Get(1) == nil {
   222  		return args.Get(0).(uint64), nil
   223  	}
   224  	return args.Get(0).(uint64), args.Get(1).(error)
   225  }
   226  
   227  func (mc *mockCommitter) DoesPvtDataInfoExistInLedger(blkNum uint64) (bool, error) {
   228  	args := mc.Called(blkNum)
   229  	return args.Get(0).(bool), args.Error(1)
   230  }
   231  
   232  func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block {
   233  	if mc.Called(blockSeqs).Get(0) == nil {
   234  		return nil
   235  	}
   236  	return mc.Called(blockSeqs).Get(0).([]*pcomm.Block)
   237  }
   238  
   239  func (*mockCommitter) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   240  	panic("implement me")
   241  }
   242  
   243  func (*mockCommitter) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   244  	panic("implement me")
   245  }
   246  
   247  func (*mockCommitter) Close() {
   248  }
   249  
   250  type ramLedger struct {
   251  	ledger map[uint64]*ledger.BlockAndPvtData
   252  	sync.RWMutex
   253  }
   254  
   255  func (mock *ramLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   256  	panic("implement me")
   257  }
   258  
   259  func (mock *ramLedger) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   260  	panic("implement me")
   261  }
   262  
   263  func (mock *ramLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   264  	panic("implement me")
   265  }
   266  
   267  func (mock *ramLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) {
   268  	mock.RLock()
   269  	defer mock.RUnlock()
   270  
   271  	if block, ok := mock.ledger[blockNum]; !ok {
   272  		return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNum))
   273  	} else {
   274  		return block, nil
   275  	}
   276  }
   277  
   278  func (mock *ramLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   279  	panic("implement me")
   280  }
   281  
   282  func (mock *ramLedger) CommitLegacy(blockAndPvtdata *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   283  	mock.Lock()
   284  	defer mock.Unlock()
   285  
   286  	if blockAndPvtdata != nil && blockAndPvtdata.Block != nil {
   287  		mock.ledger[blockAndPvtdata.Block.Header.Number] = blockAndPvtdata
   288  		return nil
   289  	}
   290  	return errors.New("invalid input parameters for block and private data param")
   291  }
   292  
   293  func (mock *ramLedger) GetBlockchainInfo() (*pcomm.BlockchainInfo, error) {
   294  	mock.RLock()
   295  	defer mock.RUnlock()
   296  
   297  	currentBlock := mock.ledger[uint64(len(mock.ledger)-1)].Block
   298  	return &pcomm.BlockchainInfo{
   299  		Height:            currentBlock.Header.Number + 1,
   300  		CurrentBlockHash:  protoutil.BlockHeaderHash(currentBlock.Header),
   301  		PreviousBlockHash: currentBlock.Header.PreviousHash,
   302  	}, nil
   303  }
   304  
   305  func (mock *ramLedger) DoesPvtDataInfoExist(blkNum uint64) (bool, error) {
   306  	return false, nil
   307  }
   308  
   309  func (mock *ramLedger) GetBlockByNumber(blockNumber uint64) (*pcomm.Block, error) {
   310  	mock.RLock()
   311  	defer mock.RUnlock()
   312  
   313  	if blockAndPvtData, ok := mock.ledger[blockNumber]; !ok {
   314  		return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNumber))
   315  	} else {
   316  		return blockAndPvtData.Block, nil
   317  	}
   318  }
   319  
   320  func (mock *ramLedger) Close() {
   321  
   322  }
   323  
   324  // Create new instance of KVLedger to be used for testing
   325  func newCommitter() committer.Committer {
   326  	cb, _ := test.MakeGenesisBlock("testChain")
   327  	ldgr := &ramLedger{
   328  		ledger: make(map[uint64]*ledger.BlockAndPvtData),
   329  	}
   330  	ldgr.CommitLegacy(&ledger.BlockAndPvtData{Block: cb}, &ledger.CommitOptions{})
   331  	return committer.NewLedgerCommitter(ldgr)
   332  }
   333  
   334  func newPeerNodeWithGossip(id int, committer committer.Committer,
   335  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, bootPorts ...int) *peerNode {
   336  	return newPeerNodeWithGossipWithValidator(id, committer, acceptor, g, &validator.MockValidator{}, bootPorts...)
   337  }
   338  
   339  // Constructing pseudo peer node, simulating only gossip and state transfer part
   340  func newPeerNodeWithGossipWithValidatorWithMetrics(id int, committer committer.Committer,
   341  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, v txvalidator.Validator,
   342  	gossipMetrics *metrics.GossipMetrics, bootPorts ...int) (node *peerNode, port int) {
   343  	cs := &cryptoServiceMock{acceptor: acceptor}
   344  	port, gRPCServer, certs, secureDialOpts, _ := gossiputil.CreateGRPCLayer()
   345  
   346  	if g == nil {
   347  		config := &gossip.Config{
   348  			BindPort:                     port,
   349  			BootstrapPeers:               bootPeersWithPorts(bootPorts...),
   350  			ID:                           fmt.Sprintf("p%d", id),
   351  			MaxBlockCountToStore:         0,
   352  			MaxPropagationBurstLatency:   time.Duration(10) * time.Millisecond,
   353  			MaxPropagationBurstSize:      10,
   354  			PropagateIterations:          1,
   355  			PropagatePeerNum:             3,
   356  			PullInterval:                 time.Duration(4) * time.Second,
   357  			PullPeerNum:                  5,
   358  			InternalEndpoint:             fmt.Sprintf("127.0.0.1:%d", port),
   359  			PublishCertPeriod:            10 * time.Second,
   360  			RequestStateInfoInterval:     4 * time.Second,
   361  			PublishStateInfoInterval:     4 * time.Second,
   362  			TimeForMembershipTracker:     5 * time.Second,
   363  			TLSCerts:                     certs,
   364  			DigestWaitTime:               algo.DefDigestWaitTime,
   365  			RequestWaitTime:              algo.DefRequestWaitTime,
   366  			ResponseWaitTime:             algo.DefResponseWaitTime,
   367  			DialTimeout:                  comm.DefDialTimeout,
   368  			ConnTimeout:                  comm.DefConnTimeout,
   369  			RecvBuffSize:                 comm.DefRecvBuffSize,
   370  			SendBuffSize:                 comm.DefSendBuffSize,
   371  			MsgExpirationTimeout:         channel.DefMsgExpirationTimeout,
   372  			AliveTimeInterval:            discovery.DefAliveTimeInterval,
   373  			AliveExpirationTimeout:       discovery.DefAliveExpirationTimeout,
   374  			AliveExpirationCheckInterval: discovery.DefAliveExpirationCheckInterval,
   375  			ReconnectInterval:            discovery.DefReconnectInterval,
   376  		}
   377  
   378  		selfID := api.PeerIdentityType(config.InternalEndpoint)
   379  		mcs := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
   380  		g = gossip.New(config, gRPCServer.Server(), &orgCryptoService{}, mcs, selfID, secureDialOpts, gossipMetrics)
   381  	}
   382  
   383  	g.JoinChan(&joinChanMsg{}, common.ChannelID("testchannelid"))
   384  
   385  	go func() {
   386  		gRPCServer.Start()
   387  	}()
   388  
   389  	// Initialize pseudo peer simulator, which has only three
   390  	// basic parts
   391  
   392  	servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: cs}
   393  	coordConfig := privdata.CoordinatorConfig{
   394  		PullRetryThreshold:             0,
   395  		TransientBlockRetention:        1000,
   396  		SkipPullingInvalidTransactions: false,
   397  	}
   398  
   399  	mspID := "Org1MSP"
   400  	capabilityProvider := &capabilitymock.CapabilityProvider{}
   401  	appCapability := &capabilitymock.AppCapabilities{}
   402  	capabilityProvider.On("Capabilities").Return(appCapability)
   403  	appCapability.On("StorePvtDataOfInvalidTx").Return(true)
   404  	coord := privdata.NewCoordinator(mspID, privdata.Support{
   405  		Validator:          v,
   406  		Committer:          committer,
   407  		CapabilityProvider: capabilityProvider,
   408  	}, &transientstore.Store{}, protoutil.SignedData{}, gossipMetrics.PrivdataMetrics, coordConfig, nil)
   409  	stateConfig := &StateConfig{
   410  		StateCheckInterval:   DefStateCheckInterval,
   411  		StateResponseTimeout: DefStateResponseTimeout,
   412  		StateBatchSize:       DefStateBatchSize,
   413  		StateMaxRetries:      DefStateMaxRetries,
   414  		StateBlockBufferSize: DefStateBlockBufferSize,
   415  		StateChannelSize:     DefStateChannelSize,
   416  		StateEnabled:         DefStateEnabled,
   417  	}
   418  	sp := NewGossipStateProvider("testchannelid", servicesAdapater, coord, gossipMetrics.StateMetrics, blocking, stateConfig)
   419  	if sp == nil {
   420  		gRPCServer.Stop()
   421  		return nil, port
   422  	}
   423  
   424  	return &peerNode{
   425  		port:   port,
   426  		g:      g,
   427  		s:      sp.(*GossipStateProviderImpl),
   428  		commit: committer,
   429  		cs:     cs,
   430  		grpc:   gRPCServer,
   431  	}, port
   432  
   433  }
   434  
   435  // add metrics provider for metrics testing
   436  func newPeerNodeWithGossipWithMetrics(id int, committer committer.Committer,
   437  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, gossipMetrics *metrics.GossipMetrics) *peerNode {
   438  	node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(id, committer, acceptor, g,
   439  		&validator.MockValidator{}, gossipMetrics)
   440  	return node
   441  }
   442  
   443  // Constructing pseudo peer node, simulating only gossip and state transfer part
   444  func newPeerNodeWithGossipWithValidator(id int, committer committer.Committer,
   445  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, v txvalidator.Validator, bootPorts ...int) *peerNode {
   446  	gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
   447  	node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(id, committer, acceptor, g, v, gossipMetrics, bootPorts...)
   448  	return node
   449  }
   450  
   451  // Constructing pseudo peer node, simulating only gossip and state transfer part
   452  func newPeerNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor, bootPorts ...int) *peerNode {
   453  	return newPeerNodeWithGossip(id, committer, acceptor, nil, bootPorts...)
   454  }
   455  
   456  // Constructing pseudo boot node, simulating only gossip and state transfer part, return port
   457  func newBootNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor) (node *peerNode, port int) {
   458  	v := &validator.MockValidator{}
   459  	gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
   460  	return newPeerNodeWithGossipWithValidatorWithMetrics(id, committer, acceptor, nil, v, gossipMetrics)
   461  }
   462  
   463  func TestNilDirectMsg(t *testing.T) {
   464  	t.Parallel()
   465  	mc := &mockCommitter{Mock: &mock.Mock{}}
   466  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   467  	g := &mocks.GossipMock{}
   468  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   469  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   470  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   471  	defer p.shutdown()
   472  	p.s.handleStateRequest(nil)
   473  	p.s.directMessage(nil)
   474  	sMsg, _ := protoext.NoopSign(p.s.stateRequestMessage(uint64(10), uint64(8)))
   475  	req := &comm.ReceivedMessageImpl{
   476  		SignedGossipMessage: sMsg,
   477  	}
   478  	p.s.directMessage(req)
   479  }
   480  
   481  func TestNilAddPayload(t *testing.T) {
   482  	t.Parallel()
   483  	mc := &mockCommitter{Mock: &mock.Mock{}}
   484  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   485  	g := &mocks.GossipMock{}
   486  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   487  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   488  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   489  	defer p.shutdown()
   490  	err := p.s.AddPayload(nil)
   491  	assert.Error(t, err)
   492  	assert.Contains(t, err.Error(), "nil")
   493  }
   494  
   495  func TestAddPayloadLedgerUnavailable(t *testing.T) {
   496  	t.Parallel()
   497  	mc := &mockCommitter{Mock: &mock.Mock{}}
   498  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   499  	g := &mocks.GossipMock{}
   500  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   501  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   502  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   503  	defer p.shutdown()
   504  	// Simulate a problem in the ledger
   505  	failedLedger := mock.Mock{}
   506  	failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger"))
   507  	mc.Lock()
   508  	mc.Mock = &failedLedger
   509  	mc.Unlock()
   510  
   511  	rawblock := protoutil.NewBlock(uint64(1), []byte{})
   512  	b, _ := pb.Marshal(rawblock)
   513  	err := p.s.AddPayload(&proto.Payload{
   514  		SeqNum: uint64(1),
   515  		Data:   b,
   516  	})
   517  	assert.Error(t, err)
   518  	assert.Contains(t, err.Error(), "Failed obtaining ledger height")
   519  	assert.Contains(t, err.Error(), "cannot query ledger")
   520  }
   521  
   522  func TestLargeBlockGap(t *testing.T) {
   523  	// Scenario: the peer knows of a peer who has a ledger height much higher
   524  	// than itself (500 blocks higher).
   525  	// The peer needs to ask blocks in a way such that the size of the payload buffer
   526  	// never rises above a certain threshold.
   527  	t.Parallel()
   528  	mc := &mockCommitter{Mock: &mock.Mock{}}
   529  	blocksPassedToLedger := make(chan uint64, 200)
   530  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   531  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   532  	})
   533  	msgsFromPeer := make(chan protoext.ReceivedMessage)
   534  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   535  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   536  	g := &mocks.GossipMock{}
   537  	membership := []discovery.NetworkMember{
   538  		{
   539  			PKIid:    common.PKIidType("a"),
   540  			Endpoint: "a",
   541  			Properties: &proto.Properties{
   542  				LedgerHeight: 500,
   543  			},
   544  		}}
   545  	g.On("PeersOfChannel", mock.Anything).Return(membership)
   546  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   547  	g.On("Accept", mock.Anything, true).Return(nil, msgsFromPeer)
   548  	g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   549  		msg := arguments.Get(0).(*proto.GossipMessage)
   550  		// The peer requested a state request
   551  		req := msg.GetStateRequest()
   552  		// Construct a skeleton for the response
   553  		res := &proto.GossipMessage{
   554  			Nonce:   msg.Nonce,
   555  			Channel: []byte("testchannelid"),
   556  			Content: &proto.GossipMessage_StateResponse{
   557  				StateResponse: &proto.RemoteStateResponse{},
   558  			},
   559  		}
   560  		// Populate the response with payloads according to what the peer asked
   561  		for seq := req.StartSeqNum; seq <= req.EndSeqNum; seq++ {
   562  			rawblock := protoutil.NewBlock(seq, []byte{})
   563  			b, _ := pb.Marshal(rawblock)
   564  			payload := &proto.Payload{
   565  				SeqNum: seq,
   566  				Data:   b,
   567  			}
   568  			res.GetStateResponse().Payloads = append(res.GetStateResponse().Payloads, payload)
   569  		}
   570  		// Finally, send the response down the channel the peer expects to receive it from
   571  		sMsg, _ := protoext.NoopSign(res)
   572  		msgsFromPeer <- &comm.ReceivedMessageImpl{
   573  			SignedGossipMessage: sMsg,
   574  		}
   575  	})
   576  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   577  	defer p.shutdown()
   578  
   579  	// Process blocks at a speed of 20 Millisecond for each block.
   580  	// The imaginative peer that responds to state
   581  	// If the payload buffer expands above defMaxBlockDistance*2 + defAntiEntropyBatchSize blocks, fail the test
   582  	blockProcessingTime := 20 * time.Millisecond // 10 seconds for total 500 blocks
   583  	expectedSequence := 1
   584  	for expectedSequence < 500 {
   585  		blockSeq := <-blocksPassedToLedger
   586  		assert.Equal(t, expectedSequence, int(blockSeq))
   587  		// Ensure payload buffer isn't over-populated
   588  		assert.True(t, p.s.payloads.Size() <= defMaxBlockDistance*2+defAntiEntropyBatchSize, "payload buffer size is %d", p.s.payloads.Size())
   589  		expectedSequence++
   590  		time.Sleep(blockProcessingTime)
   591  	}
   592  }
   593  
   594  func TestOverPopulation(t *testing.T) {
   595  	// Scenario: Add to the state provider blocks
   596  	// with a gap in between, and ensure that the payload buffer
   597  	// rejects blocks starting if the distance between the ledger height to the latest
   598  	// block it contains is bigger than defMaxBlockDistance.
   599  	t.Parallel()
   600  	mc := &mockCommitter{Mock: &mock.Mock{}}
   601  	blocksPassedToLedger := make(chan uint64, 10)
   602  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   603  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   604  	})
   605  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   606  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   607  	g := &mocks.GossipMock{}
   608  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   609  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   610  	p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
   611  	defer p.shutdown()
   612  
   613  	// Add some blocks in a sequential manner and make sure it works
   614  	for i := 1; i <= 4; i++ {
   615  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   616  		b, _ := pb.Marshal(rawblock)
   617  		assert.NoError(t, p.s.addPayload(&proto.Payload{
   618  			SeqNum: uint64(i),
   619  			Data:   b,
   620  		}, nonBlocking))
   621  	}
   622  
   623  	// Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9]
   624  	// Should succeed
   625  	for i := 10; i <= defMaxBlockDistance; i++ {
   626  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   627  		b, _ := pb.Marshal(rawblock)
   628  		assert.NoError(t, p.s.addPayload(&proto.Payload{
   629  			SeqNum: uint64(i),
   630  			Data:   b,
   631  		}, nonBlocking))
   632  	}
   633  
   634  	// Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10
   635  	// Should fail.
   636  	for i := defMaxBlockDistance + 1; i <= defMaxBlockDistance*10; i++ {
   637  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   638  		b, _ := pb.Marshal(rawblock)
   639  		assert.Error(t, p.s.addPayload(&proto.Payload{
   640  			SeqNum: uint64(i),
   641  			Data:   b,
   642  		}, nonBlocking))
   643  	}
   644  
   645  	// Ensure only blocks 1-4 were passed to the ledger
   646  	close(blocksPassedToLedger)
   647  	i := 1
   648  	for seq := range blocksPassedToLedger {
   649  		assert.Equal(t, uint64(i), seq)
   650  		i++
   651  	}
   652  	assert.Equal(t, 5, i)
   653  
   654  	// Ensure we don't store too many blocks in memory
   655  	sp := p.s
   656  	assert.True(t, sp.payloads.Size() < defMaxBlockDistance)
   657  }
   658  
   659  func TestBlockingEnqueue(t *testing.T) {
   660  	// Scenario: In parallel, get blocks from gossip and from the orderer.
   661  	// The blocks from the orderer we get are X2 times the amount of blocks from gossip.
   662  	// The blocks we get from gossip are random indices, to maximize disruption.
   663  	t.Parallel()
   664  	mc := &mockCommitter{Mock: &mock.Mock{}}
   665  	blocksPassedToLedger := make(chan uint64, 10)
   666  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   667  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   668  	})
   669  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   670  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   671  	g := &mocks.GossipMock{}
   672  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   673  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   674  	p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
   675  	defer p.shutdown()
   676  
   677  	numBlocksReceived := 500
   678  	receivedBlockCount := 0
   679  	// Get a block from the orderer every 1ms
   680  	go func() {
   681  		for i := 1; i <= numBlocksReceived; i++ {
   682  			rawblock := protoutil.NewBlock(uint64(i), []byte{})
   683  			b, _ := pb.Marshal(rawblock)
   684  			block := &proto.Payload{
   685  				SeqNum: uint64(i),
   686  				Data:   b,
   687  			}
   688  			p.s.AddPayload(block)
   689  			time.Sleep(time.Millisecond)
   690  		}
   691  	}()
   692  
   693  	// Get a block from gossip every 1ms too
   694  	go func() {
   695  		rand.Seed(time.Now().UnixNano())
   696  		for i := 1; i <= numBlocksReceived/2; i++ {
   697  			blockSeq := rand.Intn(numBlocksReceived)
   698  			rawblock := protoutil.NewBlock(uint64(blockSeq), []byte{})
   699  			b, _ := pb.Marshal(rawblock)
   700  			block := &proto.Payload{
   701  				SeqNum: uint64(blockSeq),
   702  				Data:   b,
   703  			}
   704  			p.s.addPayload(block, nonBlocking)
   705  			time.Sleep(time.Millisecond)
   706  		}
   707  	}()
   708  
   709  	for {
   710  		receivedBlock := <-blocksPassedToLedger
   711  		receivedBlockCount++
   712  		m := &mock.Mock{}
   713  		m.On("LedgerHeight", mock.Anything).Return(receivedBlock, nil)
   714  		m.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   715  		m.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   716  			blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   717  		})
   718  		mc.Lock()
   719  		mc.Mock = m
   720  		mc.Unlock()
   721  		assert.Equal(t, receivedBlock, uint64(receivedBlockCount))
   722  		if int(receivedBlockCount) == numBlocksReceived {
   723  			break
   724  		}
   725  		time.Sleep(time.Millisecond * 10)
   726  	}
   727  }
   728  
   729  func TestHaltChainProcessing(t *testing.T) {
   730  	gossipChannel := func(c chan *proto.GossipMessage) <-chan *proto.GossipMessage {
   731  		return c
   732  	}
   733  	makeBlock := func(seq int) []byte {
   734  		b := &pcomm.Block{
   735  			Header: &pcomm.BlockHeader{
   736  				Number: uint64(seq),
   737  			},
   738  			Data: &pcomm.BlockData{
   739  				Data: [][]byte{},
   740  			},
   741  			Metadata: &pcomm.BlockMetadata{
   742  				Metadata: [][]byte{
   743  					{}, {}, {}, {},
   744  				},
   745  			},
   746  		}
   747  		data, _ := pb.Marshal(b)
   748  		return data
   749  	}
   750  	newBlockMsg := func(i int) *proto.GossipMessage {
   751  		return &proto.GossipMessage{
   752  			Channel: []byte("testchannelid"),
   753  			Content: &proto.GossipMessage_DataMsg{
   754  				DataMsg: &proto.DataMessage{
   755  					Payload: &proto.Payload{
   756  						SeqNum: uint64(i),
   757  						Data:   makeBlock(i),
   758  					},
   759  				},
   760  			},
   761  		}
   762  	}
   763  
   764  	buf := gbytes.NewBuffer()
   765  	logging, err := flogging.New(flogging.Config{
   766  		LogSpec: "debug",
   767  		Writer:  buf,
   768  	})
   769  	assert.NoError(t, err, "failed to create logging")
   770  
   771  	defer func(l gossiputil.Logger) { logger = l }(logger)
   772  	l := logging.Logger("state_test")
   773  	logger = l
   774  
   775  	mc := &mockCommitter{Mock: &mock.Mock{}}
   776  	mc.On("CommitLegacy", mock.Anything)
   777  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   778  	g := &mocks.GossipMock{}
   779  	gossipMsgs := make(chan *proto.GossipMessage)
   780  
   781  	g.On("Accept", mock.Anything, false).Return(gossipChannel(gossipMsgs), nil)
   782  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   783  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   784  
   785  	v := &validator.MockValidator{}
   786  	v.On("Validate").Return(&errors2.VSCCExecutionFailureError{
   787  		Err: errors.New("foobar"),
   788  	}).Once()
   789  	peerNode := newPeerNodeWithGossipWithValidator(0, mc, noopPeerIdentityAcceptor, g, v)
   790  	defer peerNode.shutdown()
   791  	gossipMsgs <- newBlockMsg(1)
   792  
   793  	assertLogged(t, buf, "Got error while committing")
   794  	assertLogged(t, buf, "Aborting chain processing")
   795  	assertLogged(t, buf, "foobar")
   796  }
   797  
   798  func TestFailures(t *testing.T) {
   799  	t.Parallel()
   800  	mc := &mockCommitter{Mock: &mock.Mock{}}
   801  	mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil)
   802  	g := &mocks.GossipMock{}
   803  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   804  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   805  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   806  	assert.Panics(t, func() {
   807  		newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   808  	})
   809  	// Reprogram mock
   810  	mc.Mock = &mock.Mock{}
   811  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger"))
   812  	assert.Nil(t, newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g))
   813  }
   814  
   815  func TestGossipReception(t *testing.T) {
   816  	t.Parallel()
   817  	signalChan := make(chan struct{})
   818  	rawblock := &pcomm.Block{
   819  		Header: &pcomm.BlockHeader{
   820  			Number: uint64(1),
   821  		},
   822  		Data: &pcomm.BlockData{
   823  			Data: [][]byte{},
   824  		},
   825  		Metadata: &pcomm.BlockMetadata{
   826  			Metadata: [][]byte{
   827  				{}, {}, {}, {},
   828  			},
   829  		},
   830  	}
   831  	b, _ := pb.Marshal(rawblock)
   832  
   833  	newMsg := func(channel string) *proto.GossipMessage {
   834  		{
   835  			return &proto.GossipMessage{
   836  				Channel: []byte(channel),
   837  				Content: &proto.GossipMessage_DataMsg{
   838  					DataMsg: &proto.DataMessage{
   839  						Payload: &proto.Payload{
   840  							SeqNum: 1,
   841  							Data:   b,
   842  						},
   843  					},
   844  				},
   845  			}
   846  		}
   847  	}
   848  
   849  	createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage {
   850  		c := make(chan *proto.GossipMessage)
   851  
   852  		go func(c chan *proto.GossipMessage) {
   853  			// Wait for Accept() to be called
   854  			<-signalChan
   855  			// Simulate a message reception from the gossip component with an invalid channel
   856  			c <- newMsg("AAA")
   857  			// Simulate a message reception from the gossip component
   858  			c <- newMsg("testchannelid")
   859  		}(c)
   860  		return c
   861  	}
   862  
   863  	g := &mocks.GossipMock{}
   864  	rmc := createChan(signalChan)
   865  	g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) {
   866  		signalChan <- struct{}{}
   867  	})
   868  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   869  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   870  	mc := &mockCommitter{Mock: &mock.Mock{}}
   871  	receivedChan := make(chan struct{})
   872  	mc.On("CommitLegacy", mock.Anything).Run(func(arguments mock.Arguments) {
   873  		block := arguments.Get(0).(*pcomm.Block)
   874  		assert.Equal(t, uint64(1), block.Header.Number)
   875  		receivedChan <- struct{}{}
   876  	})
   877  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   878  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   879  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   880  	defer p.shutdown()
   881  	select {
   882  	case <-receivedChan:
   883  	case <-time.After(time.Second * 15):
   884  		assert.Fail(t, "Didn't commit a block within a timely manner")
   885  	}
   886  }
   887  
   888  func TestLedgerHeightFromProperties(t *testing.T) {
   889  	// Scenario: For each test, spawn a peer and supply it
   890  	// with a specific mock of PeersOfChannel from peers that
   891  	// either set both metadata properly, or only the properties, or none, or both.
   892  	// Ensure the logic handles all of the 4 possible cases as needed
   893  
   894  	t.Parallel()
   895  	// Returns whether the given networkMember was selected or not
   896  	wasNetworkMemberSelected := func(t *testing.T, networkMember discovery.NetworkMember, wg *sync.WaitGroup) bool {
   897  		var wasGivenNetworkMemberSelected int32
   898  		finChan := make(chan struct{})
   899  		g := &mocks.GossipMock{}
   900  		g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   901  			defer wg.Done()
   902  			msg := arguments.Get(0).(*proto.GossipMessage)
   903  			assert.NotNil(t, msg.GetStateRequest())
   904  			peer := arguments.Get(1).([]*comm.RemotePeer)[0]
   905  			if bytes.Equal(networkMember.PKIid, peer.PKIID) {
   906  				atomic.StoreInt32(&wasGivenNetworkMemberSelected, 1)
   907  			}
   908  			finChan <- struct{}{}
   909  		})
   910  		g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   911  		g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   912  		defaultPeer := discovery.NetworkMember{
   913  			InternalEndpoint: "b",
   914  			PKIid:            common.PKIidType("b"),
   915  			Properties: &proto.Properties{
   916  				LedgerHeight: 5,
   917  			},
   918  		}
   919  		g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{
   920  			defaultPeer,
   921  			networkMember,
   922  		})
   923  		mc := &mockCommitter{Mock: &mock.Mock{}}
   924  		mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   925  		p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   926  		defer p.shutdown()
   927  		select {
   928  		case <-time.After(time.Second * 20):
   929  			t.Fatal("Didn't send a request within a timely manner")
   930  		case <-finChan:
   931  		}
   932  		return atomic.LoadInt32(&wasGivenNetworkMemberSelected) == 1
   933  	}
   934  
   935  	peerWithProperties := discovery.NetworkMember{
   936  		PKIid: common.PKIidType("peerWithoutMetadata"),
   937  		Properties: &proto.Properties{
   938  			LedgerHeight: 10,
   939  		},
   940  		InternalEndpoint: "peerWithoutMetadata",
   941  	}
   942  
   943  	peerWithoutProperties := discovery.NetworkMember{
   944  		PKIid:            common.PKIidType("peerWithoutProperties"),
   945  		InternalEndpoint: "peerWithoutProperties",
   946  	}
   947  
   948  	tests := []struct {
   949  		shouldGivenBeSelected bool
   950  		member                discovery.NetworkMember
   951  	}{
   952  		{member: peerWithProperties, shouldGivenBeSelected: true},
   953  		{member: peerWithoutProperties, shouldGivenBeSelected: false},
   954  	}
   955  
   956  	var wg sync.WaitGroup
   957  	wg.Add(len(tests))
   958  	for _, tst := range tests {
   959  		go func(shouldGivenBeSelected bool, member discovery.NetworkMember) {
   960  			assert.Equal(t, shouldGivenBeSelected, wasNetworkMemberSelected(t, member, &wg))
   961  		}(tst.shouldGivenBeSelected, tst.member)
   962  	}
   963  	wg.Wait()
   964  }
   965  
   966  func TestAccessControl(t *testing.T) {
   967  	t.Parallel()
   968  	bootstrapSetSize := 5
   969  	bootstrapSet := make([]*peerNode, 0)
   970  
   971  	authorizedPeersSize := 4
   972  	var listeners []net.Listener
   973  	var endpoints []string
   974  
   975  	for i := 0; i < authorizedPeersSize; i++ {
   976  		ll, err := net.Listen("tcp", "127.0.0.1:0")
   977  		assert.NoError(t, err)
   978  		listeners = append(listeners, ll)
   979  		endpoint := ll.Addr().String()
   980  		endpoints = append(endpoints, endpoint)
   981  	}
   982  
   983  	defer func() {
   984  		for _, ll := range listeners {
   985  			ll.Close()
   986  		}
   987  	}()
   988  
   989  	authorizedPeers := map[string]struct{}{
   990  		endpoints[0]: {},
   991  		endpoints[1]: {},
   992  		endpoints[2]: {},
   993  		endpoints[3]: {},
   994  	}
   995  
   996  	blockPullPolicy := func(identity api.PeerIdentityType) error {
   997  		if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized {
   998  			return nil
   999  		}
  1000  		return errors.New("Not authorized")
  1001  	}
  1002  
  1003  	var bootPorts []int
  1004  
  1005  	for i := 0; i < bootstrapSetSize; i++ {
  1006  		commit := newCommitter()
  1007  		bootPeer, bootPort := newBootNode(i, commit, blockPullPolicy)
  1008  		bootstrapSet = append(bootstrapSet, bootPeer)
  1009  		bootPorts = append(bootPorts, bootPort)
  1010  	}
  1011  
  1012  	defer func() {
  1013  		for _, p := range bootstrapSet {
  1014  			p.shutdown()
  1015  		}
  1016  	}()
  1017  
  1018  	msgCount := 5
  1019  
  1020  	for i := 1; i <= msgCount; i++ {
  1021  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1022  		if b, err := pb.Marshal(rawblock); err == nil {
  1023  			payload := &proto.Payload{
  1024  				SeqNum: uint64(i),
  1025  				Data:   b,
  1026  			}
  1027  			bootstrapSet[0].s.AddPayload(payload)
  1028  		} else {
  1029  			t.Fail()
  1030  		}
  1031  	}
  1032  
  1033  	standardPeerSetSize := 10
  1034  	peersSet := make([]*peerNode, 0)
  1035  
  1036  	for i := 0; i < standardPeerSetSize; i++ {
  1037  		commit := newCommitter()
  1038  		peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, blockPullPolicy, bootPorts...))
  1039  	}
  1040  
  1041  	defer func() {
  1042  		for _, p := range peersSet {
  1043  			p.shutdown()
  1044  		}
  1045  	}()
  1046  
  1047  	waitUntilTrueOrTimeout(t, func() bool {
  1048  		for _, p := range peersSet {
  1049  			if len(p.g.PeersOfChannel(common.ChannelID("testchannelid"))) != bootstrapSetSize+standardPeerSetSize-1 {
  1050  				t.Log("Peer discovery has not finished yet")
  1051  				return false
  1052  			}
  1053  		}
  1054  		t.Log("All peer discovered each other!!!")
  1055  		return true
  1056  	}, 30*time.Second)
  1057  
  1058  	t.Log("Waiting for all blocks to arrive.")
  1059  	waitUntilTrueOrTimeout(t, func() bool {
  1060  		t.Log("Trying to see all authorized peers get all blocks, and all non-authorized didn't")
  1061  		for _, p := range peersSet {
  1062  			height, err := p.commit.LedgerHeight()
  1063  			id := fmt.Sprintf("127.0.0.1:%d", p.port)
  1064  			if _, isAuthorized := authorizedPeers[id]; isAuthorized {
  1065  				if height != uint64(msgCount+1) || err != nil {
  1066  					return false
  1067  				}
  1068  			} else {
  1069  				if err == nil && height > 1 {
  1070  					assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height)
  1071  				}
  1072  			}
  1073  		}
  1074  		t.Log("All peers have same ledger height!!!")
  1075  		return true
  1076  	}, 60*time.Second)
  1077  }
  1078  
  1079  func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) {
  1080  	t.Parallel()
  1081  	bootstrapSetSize := 5
  1082  	bootstrapSet := make([]*peerNode, 0)
  1083  
  1084  	var bootPorts []int
  1085  
  1086  	for i := 0; i < bootstrapSetSize; i++ {
  1087  		commit := newCommitter()
  1088  		bootPeer, bootPort := newBootNode(i, commit, noopPeerIdentityAcceptor)
  1089  		bootstrapSet = append(bootstrapSet, bootPeer)
  1090  		bootPorts = append(bootPorts, bootPort)
  1091  	}
  1092  
  1093  	defer func() {
  1094  		for _, p := range bootstrapSet {
  1095  			p.shutdown()
  1096  		}
  1097  	}()
  1098  
  1099  	msgCount := 10
  1100  
  1101  	for i := 1; i <= msgCount; i++ {
  1102  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1103  		if b, err := pb.Marshal(rawblock); err == nil {
  1104  			payload := &proto.Payload{
  1105  				SeqNum: uint64(i),
  1106  				Data:   b,
  1107  			}
  1108  			bootstrapSet[0].s.AddPayload(payload)
  1109  		} else {
  1110  			t.Fail()
  1111  		}
  1112  	}
  1113  
  1114  	standartPeersSize := 10
  1115  	peersSet := make([]*peerNode, 0)
  1116  
  1117  	for i := 0; i < standartPeersSize; i++ {
  1118  		commit := newCommitter()
  1119  		peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, noopPeerIdentityAcceptor, bootPorts...))
  1120  	}
  1121  
  1122  	defer func() {
  1123  		for _, p := range peersSet {
  1124  			p.shutdown()
  1125  		}
  1126  	}()
  1127  
  1128  	waitUntilTrueOrTimeout(t, func() bool {
  1129  		for _, p := range peersSet {
  1130  			if len(p.g.PeersOfChannel(common.ChannelID("testchannelid"))) != bootstrapSetSize+standartPeersSize-1 {
  1131  				t.Log("Peer discovery has not finished yet")
  1132  				return false
  1133  			}
  1134  		}
  1135  		t.Log("All peer discovered each other!!!")
  1136  		return true
  1137  	}, 30*time.Second)
  1138  
  1139  	t.Log("Waiting for all blocks to arrive.")
  1140  	waitUntilTrueOrTimeout(t, func() bool {
  1141  		t.Log("Trying to see all peers get all blocks")
  1142  		for _, p := range peersSet {
  1143  			height, err := p.commit.LedgerHeight()
  1144  			if height != uint64(msgCount+1) || err != nil {
  1145  				return false
  1146  			}
  1147  		}
  1148  		t.Log("All peers have same ledger height!!!")
  1149  		return true
  1150  	}, 60*time.Second)
  1151  }
  1152  
  1153  // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into
  1154  // local ledger, next spawning a new peer waiting for anti-entropy procedure to
  1155  // complete missing blocks. Since state transfer messages now batched, it is expected
  1156  // to see _exactly_ two messages with state transfer response.
  1157  func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) {
  1158  	t.Parallel()
  1159  	bootPeer, bootPort := newBootNode(0, newCommitter(), noopPeerIdentityAcceptor)
  1160  	defer bootPeer.shutdown()
  1161  
  1162  	msgCount := defAntiEntropyBatchSize + 5
  1163  	expectedMessagesCnt := 2
  1164  
  1165  	for i := 1; i <= msgCount; i++ {
  1166  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1167  		if b, err := pb.Marshal(rawblock); err == nil {
  1168  			payload := &proto.Payload{
  1169  				SeqNum: uint64(i),
  1170  				Data:   b,
  1171  			}
  1172  			bootPeer.s.AddPayload(payload)
  1173  		} else {
  1174  			t.Fail()
  1175  		}
  1176  	}
  1177  
  1178  	peer := newPeerNode(1, newCommitter(), noopPeerIdentityAcceptor, bootPort)
  1179  	defer peer.shutdown()
  1180  
  1181  	naiveStateMsgPredicate := func(message interface{}) bool {
  1182  		return protoext.IsRemoteStateMessage(message.(protoext.ReceivedMessage).GetGossipMessage().GossipMessage)
  1183  	}
  1184  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
  1185  
  1186  	wg := sync.WaitGroup{}
  1187  	wg.Add(expectedMessagesCnt)
  1188  
  1189  	// Number of submitted messages is defAntiEntropyBatchSize + 5, therefore
  1190  	// expected number of batches is expectedMessagesCnt = 2. Following go routine
  1191  	// makes sure it receives expected amount of messages and sends signal of success
  1192  	// to continue the test
  1193  	go func() {
  1194  		for count := 0; count < expectedMessagesCnt; count++ {
  1195  			<-peerCh
  1196  			wg.Done()
  1197  		}
  1198  	}()
  1199  
  1200  	// Once we got message which indicate of two batches being received,
  1201  	// making sure messages indeed committed.
  1202  	waitUntilTrueOrTimeout(t, func() bool {
  1203  		if len(peer.g.PeersOfChannel(common.ChannelID("testchannelid"))) != 1 {
  1204  			t.Log("Peer discovery has not finished yet")
  1205  			return false
  1206  		}
  1207  		t.Log("All peer discovered each other!!!")
  1208  		return true
  1209  	}, 30*time.Second)
  1210  
  1211  	// Waits for message which indicates that expected number of message batches received
  1212  	// otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds
  1213  	wg.Wait()
  1214  
  1215  	t.Log("Waiting for all blocks to arrive.")
  1216  	waitUntilTrueOrTimeout(t, func() bool {
  1217  		t.Log("Trying to see all peers get all blocks")
  1218  		height, err := peer.commit.LedgerHeight()
  1219  		if height != uint64(msgCount+1) || err != nil {
  1220  			return false
  1221  		}
  1222  		t.Log("All peers have same ledger height!!!")
  1223  		return true
  1224  	}, 60*time.Second)
  1225  }
  1226  
  1227  // coordinatorMock mocking structure to capture mock interface for
  1228  // coord to simulate coord flow during the test
  1229  type coordinatorMock struct {
  1230  	committer.Committer
  1231  	mock.Mock
  1232  }
  1233  
  1234  func (mock *coordinatorMock) GetPvtDataAndBlockByNum(seqNum uint64, _ protoutil.SignedData) (*pcomm.Block, gutil.PvtDataCollections, error) {
  1235  	args := mock.Called(seqNum)
  1236  	return args.Get(0).(*pcomm.Block), args.Get(1).(gutil.PvtDataCollections), args.Error(2)
  1237  }
  1238  
  1239  func (mock *coordinatorMock) GetBlockByNum(seqNum uint64) (*pcomm.Block, error) {
  1240  	args := mock.Called(seqNum)
  1241  	return args.Get(0).(*pcomm.Block), args.Error(1)
  1242  }
  1243  
  1244  func (mock *coordinatorMock) StoreBlock(block *pcomm.Block, data gutil.PvtDataCollections) error {
  1245  	args := mock.Called(block, data)
  1246  	return args.Error(1)
  1247  }
  1248  
  1249  func (mock *coordinatorMock) LedgerHeight() (uint64, error) {
  1250  	args := mock.Called()
  1251  	return args.Get(0).(uint64), args.Error(1)
  1252  }
  1253  
  1254  func (mock *coordinatorMock) Close() {
  1255  	mock.Called()
  1256  }
  1257  
  1258  // StorePvtData used to persist private date into transient store
  1259  func (mock *coordinatorMock) StorePvtData(txid string, privData *tspb.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error {
  1260  	return mock.Called().Error(0)
  1261  }
  1262  
  1263  type receivedMessageMock struct {
  1264  	mock.Mock
  1265  }
  1266  
  1267  // Ack returns to the sender an acknowledgement for the message
  1268  func (mock *receivedMessageMock) Ack(err error) {
  1269  
  1270  }
  1271  
  1272  func (mock *receivedMessageMock) Respond(msg *proto.GossipMessage) {
  1273  	mock.Called(msg)
  1274  }
  1275  
  1276  func (mock *receivedMessageMock) GetGossipMessage() *protoext.SignedGossipMessage {
  1277  	args := mock.Called()
  1278  	return args.Get(0).(*protoext.SignedGossipMessage)
  1279  }
  1280  
  1281  func (mock *receivedMessageMock) GetSourceEnvelope() *proto.Envelope {
  1282  	args := mock.Called()
  1283  	return args.Get(0).(*proto.Envelope)
  1284  }
  1285  
  1286  func (mock *receivedMessageMock) GetConnectionInfo() *protoext.ConnectionInfo {
  1287  	args := mock.Called()
  1288  	return args.Get(0).(*protoext.ConnectionInfo)
  1289  }
  1290  
  1291  type testData struct {
  1292  	block   *pcomm.Block
  1293  	pvtData gutil.PvtDataCollections
  1294  }
  1295  
  1296  func TestTransferOfPrivateRWSet(t *testing.T) {
  1297  	t.Parallel()
  1298  	chainID := "testChainID"
  1299  
  1300  	// First gossip instance
  1301  	g := &mocks.GossipMock{}
  1302  	coord1 := new(coordinatorMock)
  1303  
  1304  	gossipChannel := make(chan *proto.GossipMessage)
  1305  	commChannel := make(chan protoext.ReceivedMessage)
  1306  
  1307  	gossipChannelFactory := func(ch chan *proto.GossipMessage) <-chan *proto.GossipMessage {
  1308  		return ch
  1309  	}
  1310  
  1311  	g.On("Accept", mock.Anything, false).Return(gossipChannelFactory(gossipChannel), nil)
  1312  	g.On("Accept", mock.Anything, true).Return(nil, commChannel)
  1313  
  1314  	g.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
  1315  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
  1316  	g.On("Close")
  1317  
  1318  	coord1.On("LedgerHeight", mock.Anything).Return(uint64(5), nil)
  1319  
  1320  	var data = map[uint64]*testData{
  1321  		uint64(2): {
  1322  			block: &pcomm.Block{
  1323  				Header: &pcomm.BlockHeader{
  1324  					Number:       2,
  1325  					DataHash:     []byte{0, 1, 1, 1},
  1326  					PreviousHash: []byte{0, 0, 0, 1},
  1327  				},
  1328  				Data: &pcomm.BlockData{
  1329  					Data: [][]byte{{1}, {2}, {3}},
  1330  				},
  1331  			},
  1332  			pvtData: gutil.PvtDataCollections{
  1333  				{
  1334  					SeqInBlock: uint64(0),
  1335  					WriteSet: &rwset.TxPvtReadWriteSet{
  1336  						DataModel: rwset.TxReadWriteSet_KV,
  1337  						NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1338  							{
  1339  								Namespace: "myCC:v1",
  1340  								CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1341  									{
  1342  										CollectionName: "mysecrectCollection",
  1343  										Rwset:          []byte{1, 2, 3, 4, 5},
  1344  									},
  1345  								},
  1346  							},
  1347  						},
  1348  					},
  1349  				},
  1350  			},
  1351  		},
  1352  
  1353  		uint64(3): {
  1354  			block: &pcomm.Block{
  1355  				Header: &pcomm.BlockHeader{
  1356  					Number:       3,
  1357  					DataHash:     []byte{1, 1, 1, 1},
  1358  					PreviousHash: []byte{0, 1, 1, 1},
  1359  				},
  1360  				Data: &pcomm.BlockData{
  1361  					Data: [][]byte{{4}, {5}, {6}},
  1362  				},
  1363  			},
  1364  			pvtData: gutil.PvtDataCollections{
  1365  				{
  1366  					SeqInBlock: uint64(2),
  1367  					WriteSet: &rwset.TxPvtReadWriteSet{
  1368  						DataModel: rwset.TxReadWriteSet_KV,
  1369  						NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1370  							{
  1371  								Namespace: "otherCC:v1",
  1372  								CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1373  									{
  1374  										CollectionName: "topClassified",
  1375  										Rwset:          []byte{0, 0, 0, 4, 2},
  1376  									},
  1377  								},
  1378  							},
  1379  						},
  1380  					},
  1381  				},
  1382  			},
  1383  		},
  1384  	}
  1385  
  1386  	for seqNum, each := range data {
  1387  		coord1.On("GetPvtDataAndBlockByNum", seqNum).Return(each.block, each.pvtData, nil /* no error*/)
  1388  	}
  1389  
  1390  	coord1.On("Close")
  1391  
  1392  	servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}}
  1393  	stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
  1394  	stateConfig := &StateConfig{
  1395  		StateCheckInterval:   DefStateCheckInterval,
  1396  		StateResponseTimeout: DefStateResponseTimeout,
  1397  		StateBatchSize:       DefStateBatchSize,
  1398  		StateMaxRetries:      DefStateMaxRetries,
  1399  		StateBlockBufferSize: DefStateBlockBufferSize,
  1400  		StateChannelSize:     DefStateChannelSize,
  1401  		StateEnabled:         DefStateEnabled,
  1402  	}
  1403  	st := NewGossipStateProvider(chainID, servicesAdapater, coord1, stateMetrics, blocking, stateConfig)
  1404  	defer st.Stop()
  1405  
  1406  	// Mocked state request message
  1407  	requestMsg := new(receivedMessageMock)
  1408  
  1409  	// Get state request message, blocks [2...3]
  1410  	requestGossipMsg := &proto.GossipMessage{
  1411  		// Copy nonce field from the request, so it will be possible to match response
  1412  		Nonce:   1,
  1413  		Tag:     proto.GossipMessage_CHAN_OR_ORG,
  1414  		Channel: []byte(chainID),
  1415  		Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{
  1416  			StartSeqNum: 2,
  1417  			EndSeqNum:   3,
  1418  		}},
  1419  	}
  1420  
  1421  	msg, _ := protoext.NoopSign(requestGossipMsg)
  1422  
  1423  	requestMsg.On("GetGossipMessage").Return(msg)
  1424  	requestMsg.On("GetConnectionInfo").Return(&protoext.ConnectionInfo{
  1425  		Auth: &protoext.AuthInfo{},
  1426  	})
  1427  
  1428  	// Channel to send responses back
  1429  	responseChannel := make(chan protoext.ReceivedMessage)
  1430  	defer close(responseChannel)
  1431  
  1432  	requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
  1433  		// Get gossip response to respond back on state request
  1434  		response := args.Get(0).(*proto.GossipMessage)
  1435  		// Wrap it up into received response
  1436  		receivedMsg := new(receivedMessageMock)
  1437  		// Create sign response
  1438  		msg, _ := protoext.NoopSign(response)
  1439  		// Mock to respond
  1440  		receivedMsg.On("GetGossipMessage").Return(msg)
  1441  		// Send response
  1442  		responseChannel <- receivedMsg
  1443  	})
  1444  
  1445  	// Send request message via communication channel into state transfer
  1446  	commChannel <- requestMsg
  1447  
  1448  	// State transfer request should result in state response back
  1449  	response := <-responseChannel
  1450  
  1451  	// Start the assertion section
  1452  	stateResponse := response.GetGossipMessage().GetStateResponse()
  1453  
  1454  	assertion := assert.New(t)
  1455  	// Nonce should be equal to Nonce of the request
  1456  	assertion.Equal(response.GetGossipMessage().Nonce, uint64(1))
  1457  	// Payload should not need be nil
  1458  	assertion.NotNil(stateResponse)
  1459  	assertion.NotNil(stateResponse.Payloads)
  1460  	// Exactly two messages expected
  1461  	assertion.Equal(len(stateResponse.Payloads), 2)
  1462  
  1463  	// Assert we have all data and it's same as we expected it
  1464  	for _, each := range stateResponse.Payloads {
  1465  		block := &pcomm.Block{}
  1466  		err := pb.Unmarshal(each.Data, block)
  1467  		assertion.NoError(err)
  1468  
  1469  		assertion.NotNil(block.Header)
  1470  
  1471  		testBlock, ok := data[block.Header.Number]
  1472  		assertion.True(ok)
  1473  
  1474  		for i, d := range testBlock.block.Data.Data {
  1475  			assertion.True(bytes.Equal(d, block.Data.Data[i]))
  1476  		}
  1477  
  1478  		for i, p := range testBlock.pvtData {
  1479  			pvtDataPayload := &proto.PvtDataPayload{}
  1480  			err := pb.Unmarshal(each.PrivateData[i], pvtDataPayload)
  1481  			assertion.NoError(err)
  1482  			pvtRWSet := &rwset.TxPvtReadWriteSet{}
  1483  			err = pb.Unmarshal(pvtDataPayload.Payload, pvtRWSet)
  1484  			assertion.NoError(err)
  1485  			assertion.True(pb.Equal(p.WriteSet, pvtRWSet))
  1486  		}
  1487  	}
  1488  }
  1489  
  1490  type testPeer struct {
  1491  	*mocks.GossipMock
  1492  	id            string
  1493  	gossipChannel chan *proto.GossipMessage
  1494  	commChannel   chan protoext.ReceivedMessage
  1495  	coord         *coordinatorMock
  1496  }
  1497  
  1498  func (t testPeer) Gossip() <-chan *proto.GossipMessage {
  1499  	return t.gossipChannel
  1500  }
  1501  
  1502  func (t testPeer) Comm() chan protoext.ReceivedMessage {
  1503  	return t.commChannel
  1504  }
  1505  
  1506  var peers = map[string]testPeer{
  1507  	"peer1": {
  1508  		id:            "peer1",
  1509  		gossipChannel: make(chan *proto.GossipMessage),
  1510  		commChannel:   make(chan protoext.ReceivedMessage),
  1511  		GossipMock:    &mocks.GossipMock{},
  1512  		coord:         new(coordinatorMock),
  1513  	},
  1514  	"peer2": {
  1515  		id:            "peer2",
  1516  		gossipChannel: make(chan *proto.GossipMessage),
  1517  		commChannel:   make(chan protoext.ReceivedMessage),
  1518  		GossipMock:    &mocks.GossipMock{},
  1519  		coord:         new(coordinatorMock),
  1520  	},
  1521  }
  1522  
  1523  func TestTransferOfPvtDataBetweenPeers(t *testing.T) {
  1524  	/*
  1525  	   This test covers pretty basic scenario, there are two peers: "peer1" and "peer2",
  1526  	   while peer2 missing a few blocks in the ledger therefore asking to replicate those
  1527  	   blocks from the first peers.
  1528  
  1529  	   Test going to check that block from one peer will be replicated into second one and
  1530  	   have identical content.
  1531  	*/
  1532  	t.Parallel()
  1533  	chainID := "testChainID"
  1534  
  1535  	// Initialize peer
  1536  	for _, peer := range peers {
  1537  		peer.On("Accept", mock.Anything, false).Return(peer.Gossip(), nil)
  1538  
  1539  		peer.On("Accept", mock.Anything, true).
  1540  			Return(nil, peer.Comm()).
  1541  			Once().
  1542  			On("Accept", mock.Anything, true).
  1543  			Return(nil, make(chan protoext.ReceivedMessage))
  1544  
  1545  		peer.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
  1546  		peer.coord.On("Close")
  1547  		peer.On("Close")
  1548  	}
  1549  
  1550  	// First peer going to have more advanced ledger
  1551  	peers["peer1"].coord.On("LedgerHeight", mock.Anything).Return(uint64(3), nil)
  1552  
  1553  	// Second peer has a gap of one block, hence it will have to replicate it from previous
  1554  	peers["peer2"].coord.On("LedgerHeight", mock.Anything).Return(uint64(2), nil)
  1555  
  1556  	peers["peer1"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
  1557  	peers["peer2"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
  1558  
  1559  	peers["peer1"].coord.On("GetPvtDataAndBlockByNum", uint64(2)).Return(&pcomm.Block{
  1560  		Header: &pcomm.BlockHeader{
  1561  			Number:       2,
  1562  			DataHash:     []byte{0, 0, 0, 1},
  1563  			PreviousHash: []byte{0, 1, 1, 1},
  1564  		},
  1565  		Data: &pcomm.BlockData{
  1566  			Data: [][]byte{{4}, {5}, {6}},
  1567  		},
  1568  	}, gutil.PvtDataCollections{&ledger.TxPvtData{
  1569  		SeqInBlock: uint64(1),
  1570  		WriteSet: &rwset.TxPvtReadWriteSet{
  1571  			DataModel: rwset.TxReadWriteSet_KV,
  1572  			NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1573  				{
  1574  					Namespace: "myCC:v1",
  1575  					CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1576  						{
  1577  							CollectionName: "mysecrectCollection",
  1578  							Rwset:          []byte{1, 2, 3, 4, 5},
  1579  						},
  1580  					},
  1581  				},
  1582  			},
  1583  		},
  1584  	}}, nil)
  1585  
  1586  	// Return membership of the peers
  1587  	member2 := discovery.NetworkMember{
  1588  		PKIid:            common.PKIidType([]byte{2}),
  1589  		Endpoint:         "peer2:7051",
  1590  		InternalEndpoint: "peer2:7051",
  1591  		Properties: &proto.Properties{
  1592  			LedgerHeight: 2,
  1593  		},
  1594  	}
  1595  
  1596  	member1 := discovery.NetworkMember{
  1597  		PKIid:            common.PKIidType([]byte{1}),
  1598  		Endpoint:         "peer1:7051",
  1599  		InternalEndpoint: "peer1:7051",
  1600  		Properties: &proto.Properties{
  1601  			LedgerHeight: 3,
  1602  		},
  1603  	}
  1604  
  1605  	peers["peer1"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member2})
  1606  	peers["peer2"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member1})
  1607  
  1608  	peers["peer2"].On("Send", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
  1609  		request := args.Get(0).(*proto.GossipMessage)
  1610  		requestMsg := new(receivedMessageMock)
  1611  		msg, _ := protoext.NoopSign(request)
  1612  		requestMsg.On("GetGossipMessage").Return(msg)
  1613  		requestMsg.On("GetConnectionInfo").Return(&protoext.ConnectionInfo{
  1614  			Auth: &protoext.AuthInfo{},
  1615  		})
  1616  
  1617  		requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
  1618  			response := args.Get(0).(*proto.GossipMessage)
  1619  			receivedMsg := new(receivedMessageMock)
  1620  			msg, _ := protoext.NoopSign(response)
  1621  			receivedMsg.On("GetGossipMessage").Return(msg)
  1622  			// Send response back to the peer
  1623  			peers["peer2"].commChannel <- receivedMsg
  1624  		})
  1625  
  1626  		peers["peer1"].commChannel <- requestMsg
  1627  	})
  1628  
  1629  	wg := sync.WaitGroup{}
  1630  	wg.Add(1)
  1631  	peers["peer2"].coord.On("StoreBlock", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
  1632  		wg.Done() // Done once second peer hits commit of the block
  1633  	}).Return([]string{}, nil) // No pvt data to complete and no error
  1634  
  1635  	cryptoService := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
  1636  
  1637  	stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
  1638  
  1639  	mediator := &ServicesMediator{GossipAdapter: peers["peer1"], MCSAdapter: cryptoService}
  1640  	stateConfig := &StateConfig{
  1641  		StateCheckInterval:   DefStateCheckInterval,
  1642  		StateResponseTimeout: DefStateResponseTimeout,
  1643  		StateBatchSize:       DefStateBatchSize,
  1644  		StateMaxRetries:      DefStateMaxRetries,
  1645  		StateBlockBufferSize: DefStateBlockBufferSize,
  1646  		StateChannelSize:     DefStateChannelSize,
  1647  		StateEnabled:         DefStateEnabled,
  1648  	}
  1649  	peer1State := NewGossipStateProvider(chainID, mediator, peers["peer1"].coord, stateMetrics, blocking, stateConfig)
  1650  	defer peer1State.Stop()
  1651  
  1652  	mediator = &ServicesMediator{GossipAdapter: peers["peer2"], MCSAdapter: cryptoService}
  1653  	peer2State := NewGossipStateProvider(chainID, mediator, peers["peer2"].coord, stateMetrics, blocking, stateConfig)
  1654  	defer peer2State.Stop()
  1655  
  1656  	// Make sure state was replicated
  1657  	done := make(chan struct{})
  1658  	go func() {
  1659  		wg.Wait()
  1660  		done <- struct{}{}
  1661  	}()
  1662  
  1663  	select {
  1664  	case <-done:
  1665  		break
  1666  	case <-time.After(30 * time.Second):
  1667  		t.Fail()
  1668  	}
  1669  }
  1670  
  1671  func TestStateRequestValidator(t *testing.T) {
  1672  	validator := &stateRequestValidator{}
  1673  	err := validator.validate(&proto.RemoteStateRequest{
  1674  		StartSeqNum: 10,
  1675  		EndSeqNum:   5,
  1676  	}, defAntiEntropyBatchSize)
  1677  	assert.Contains(t, err.Error(), "Invalid sequence interval [10...5).")
  1678  	assert.Error(t, err)
  1679  
  1680  	err = validator.validate(&proto.RemoteStateRequest{
  1681  		StartSeqNum: 10,
  1682  		EndSeqNum:   30,
  1683  	}, defAntiEntropyBatchSize)
  1684  	assert.Contains(t, err.Error(), "Requesting blocks range [10-30) greater than configured")
  1685  	assert.Error(t, err)
  1686  
  1687  	err = validator.validate(&proto.RemoteStateRequest{
  1688  		StartSeqNum: 10,
  1689  		EndSeqNum:   20,
  1690  	}, defAntiEntropyBatchSize)
  1691  	assert.NoError(t, err)
  1692  }
  1693  
  1694  func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) {
  1695  	ch := make(chan struct{})
  1696  	t.Log("Started to spin off, until predicate will be satisfied.")
  1697  
  1698  	go func() {
  1699  		t := time.NewTicker(time.Second)
  1700  		for !predicate() {
  1701  			select {
  1702  			case <-ch:
  1703  				t.Stop()
  1704  				return
  1705  			case <-t.C:
  1706  			}
  1707  		}
  1708  		t.Stop()
  1709  		close(ch)
  1710  	}()
  1711  
  1712  	select {
  1713  	case <-ch:
  1714  		t.Log("Done.")
  1715  		break
  1716  	case <-time.After(timeout):
  1717  		t.Fatal("Timeout has expired")
  1718  		close(ch)
  1719  		break
  1720  	}
  1721  	t.Log("Stop waiting until timeout or true")
  1722  }
  1723  
  1724  func assertLogged(t *testing.T, buf *gbytes.Buffer, msg string) {
  1725  	observed := func() bool { return strings.Contains(string(buf.Contents()), msg) }
  1726  	waitUntilTrueOrTimeout(t, observed, 30*time.Second)
  1727  }