github.com/yous1230/fabric@v2.0.0-beta.0.20191224111736-74345bee6ac2+incompatible/gossip/state/state_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package state
     8  
     9  import (
    10  	"bytes"
    11  	"errors"
    12  	"fmt"
    13  	"math/rand"
    14  	"net"
    15  	"strings"
    16  	"sync"
    17  	"sync/atomic"
    18  	"testing"
    19  	"time"
    20  
    21  	pb "github.com/golang/protobuf/proto"
    22  	pcomm "github.com/hyperledger/fabric-protos-go/common"
    23  	proto "github.com/hyperledger/fabric-protos-go/gossip"
    24  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    25  	tspb "github.com/hyperledger/fabric-protos-go/transientstore"
    26  	"github.com/hyperledger/fabric/bccsp/factory"
    27  	"github.com/hyperledger/fabric/common/configtx/test"
    28  	errors2 "github.com/hyperledger/fabric/common/errors"
    29  	"github.com/hyperledger/fabric/common/flogging"
    30  	"github.com/hyperledger/fabric/common/metrics/disabled"
    31  	corecomm "github.com/hyperledger/fabric/core/comm"
    32  	"github.com/hyperledger/fabric/core/committer"
    33  	"github.com/hyperledger/fabric/core/committer/txvalidator"
    34  	"github.com/hyperledger/fabric/core/ledger"
    35  	"github.com/hyperledger/fabric/core/mocks/validator"
    36  	"github.com/hyperledger/fabric/core/transientstore"
    37  	"github.com/hyperledger/fabric/gossip/api"
    38  	"github.com/hyperledger/fabric/gossip/comm"
    39  	"github.com/hyperledger/fabric/gossip/common"
    40  	"github.com/hyperledger/fabric/gossip/discovery"
    41  	"github.com/hyperledger/fabric/gossip/gossip"
    42  	"github.com/hyperledger/fabric/gossip/gossip/algo"
    43  	"github.com/hyperledger/fabric/gossip/gossip/channel"
    44  	"github.com/hyperledger/fabric/gossip/metrics"
    45  	"github.com/hyperledger/fabric/gossip/privdata"
    46  	capabilitymock "github.com/hyperledger/fabric/gossip/privdata/mocks"
    47  	"github.com/hyperledger/fabric/gossip/protoext"
    48  	"github.com/hyperledger/fabric/gossip/state/mocks"
    49  	gossiputil "github.com/hyperledger/fabric/gossip/util"
    50  	gutil "github.com/hyperledger/fabric/gossip/util"
    51  	"github.com/hyperledger/fabric/protoutil"
    52  	"github.com/onsi/gomega/gbytes"
    53  	"github.com/stretchr/testify/assert"
    54  	"github.com/stretchr/testify/mock"
    55  )
    56  
    57  var (
    58  	orgID = []byte("ORG1")
    59  
    60  	noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error {
    61  		return nil
    62  	}
    63  )
    64  
    65  type peerIdentityAcceptor func(identity api.PeerIdentityType) error
    66  
    67  type joinChanMsg struct {
    68  }
    69  
    70  func init() {
    71  	gutil.SetupTestLogging()
    72  	factory.InitFactories(nil)
    73  }
    74  
    75  // SequenceNumber returns the sequence number of the block that the message
    76  // is derived from
    77  func (*joinChanMsg) SequenceNumber() uint64 {
    78  	return uint64(time.Now().UnixNano())
    79  }
    80  
    81  // Members returns the organizations of the channel
    82  func (jcm *joinChanMsg) Members() []api.OrgIdentityType {
    83  	return []api.OrgIdentityType{orgID}
    84  }
    85  
    86  // AnchorPeersOf returns the anchor peers of the given organization
    87  func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
    88  	return []api.AnchorPeer{}
    89  }
    90  
    91  type orgCryptoService struct {
    92  }
    93  
    94  // OrgByPeerIdentity returns the OrgIdentityType
    95  // of a given peer identity
    96  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
    97  	return orgID
    98  }
    99  
   100  // Verify verifies a JoinChannelMessage, returns nil on success,
   101  // and an error on failure
   102  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
   103  	return nil
   104  }
   105  
   106  type cryptoServiceMock struct {
   107  	acceptor peerIdentityAcceptor
   108  }
   109  
   110  func (cryptoServiceMock) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) {
   111  	return time.Now().Add(time.Hour), nil
   112  }
   113  
   114  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   115  func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType {
   116  	return common.PKIidType(peerIdentity)
   117  }
   118  
   119  // VerifyBlock returns nil if the block is properly signed,
   120  // else returns error
   121  func (*cryptoServiceMock) VerifyBlock(channelID common.ChannelID, seqNum uint64, signedBlock *pcomm.Block) error {
   122  	return nil
   123  }
   124  
   125  // Sign signs msg with this peer's signing key and outputs
   126  // the signature if no error occurred.
   127  func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) {
   128  	clone := make([]byte, len(msg))
   129  	copy(clone, msg)
   130  	return clone, nil
   131  }
   132  
   133  // Verify checks that signature is a valid signature of message under a peer's verification key.
   134  // If the verification succeeded, Verify returns nil meaning no error occurred.
   135  // If peerCert is nil, then the signature is verified against this peer's verification key.
   136  func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   137  	equal := bytes.Equal(signature, message)
   138  	if !equal {
   139  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   140  	}
   141  	return nil
   142  }
   143  
   144  // VerifyByChannel checks that signature is a valid signature of message
   145  // under a peer's verification key, but also in the context of a specific channel.
   146  // If the verification succeeded, Verify returns nil meaning no error occurred.
   147  // If peerIdentity is nil, then the signature is verified against this peer's verification key.
   148  func (cs *cryptoServiceMock) VerifyByChannel(channelID common.ChannelID, peerIdentity api.PeerIdentityType, signature, message []byte) error {
   149  	return cs.acceptor(peerIdentity)
   150  }
   151  
   152  func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   153  	return nil
   154  }
   155  
   156  func bootPeersWithPorts(ports ...int) []string {
   157  	var peers []string
   158  	for _, port := range ports {
   159  		peers = append(peers, fmt.Sprintf("127.0.0.1:%d", port))
   160  	}
   161  	return peers
   162  }
   163  
   164  type peerNodeGossipSupport interface {
   165  	GossipAdapter
   166  	Stop()
   167  	JoinChan(joinMsg api.JoinChannelMessage, channelID common.ChannelID)
   168  }
   169  
   170  // Simple presentation of peer which includes only
   171  // communication module, gossip and state transfer
   172  type peerNode struct {
   173  	port   int
   174  	g      peerNodeGossipSupport
   175  	s      *GossipStateProviderImpl
   176  	cs     *cryptoServiceMock
   177  	commit committer.Committer
   178  	grpc   *corecomm.GRPCServer
   179  }
   180  
   181  // Shutting down all modules used
   182  func (node *peerNode) shutdown() {
   183  	node.s.Stop()
   184  	node.g.Stop()
   185  	node.grpc.Stop()
   186  }
   187  
   188  type mockCommitter struct {
   189  	*mock.Mock
   190  	sync.Mutex
   191  }
   192  
   193  func (mc *mockCommitter) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   194  	args := mc.Called()
   195  	return args.Get(0).(ledger.ConfigHistoryRetriever), args.Error(1)
   196  }
   197  
   198  func (mc *mockCommitter) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   199  	args := mc.Called(blockNum, filter)
   200  	return args.Get(0).([]*ledger.TxPvtData), args.Error(1)
   201  }
   202  
   203  func (mc *mockCommitter) CommitLegacy(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   204  	mc.Lock()
   205  	m := mc.Mock
   206  	mc.Unlock()
   207  	m.Called(blockAndPvtData.Block)
   208  	return nil
   209  }
   210  
   211  func (mc *mockCommitter) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) {
   212  	args := mc.Called(seqNum)
   213  	return args.Get(0).(*ledger.BlockAndPvtData), args.Error(1)
   214  }
   215  
   216  func (mc *mockCommitter) LedgerHeight() (uint64, error) {
   217  	mc.Lock()
   218  	m := mc.Mock
   219  	mc.Unlock()
   220  	args := m.Called()
   221  	if args.Get(1) == nil {
   222  		return args.Get(0).(uint64), nil
   223  	}
   224  	return args.Get(0).(uint64), args.Get(1).(error)
   225  }
   226  
   227  func (mc *mockCommitter) DoesPvtDataInfoExistInLedger(blkNum uint64) (bool, error) {
   228  	args := mc.Called(blkNum)
   229  	return args.Get(0).(bool), args.Error(1)
   230  }
   231  
   232  func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block {
   233  	if mc.Called(blockSeqs).Get(0) == nil {
   234  		return nil
   235  	}
   236  	return mc.Called(blockSeqs).Get(0).([]*pcomm.Block)
   237  }
   238  
   239  func (*mockCommitter) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   240  	panic("implement me")
   241  }
   242  
   243  func (*mockCommitter) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   244  	panic("implement me")
   245  }
   246  
   247  func (*mockCommitter) Close() {
   248  }
   249  
   250  type ramLedger struct {
   251  	ledger map[uint64]*ledger.BlockAndPvtData
   252  	sync.RWMutex
   253  }
   254  
   255  func (mock *ramLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   256  	panic("implement me")
   257  }
   258  
   259  func (mock *ramLedger) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   260  	panic("implement me")
   261  }
   262  
   263  func (mock *ramLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   264  	panic("implement me")
   265  }
   266  
   267  func (mock *ramLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) {
   268  	mock.RLock()
   269  	defer mock.RUnlock()
   270  
   271  	if block, ok := mock.ledger[blockNum]; !ok {
   272  		return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNum))
   273  	} else {
   274  		return block, nil
   275  	}
   276  }
   277  
   278  func (mock *ramLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   279  	panic("implement me")
   280  }
   281  
   282  func (mock *ramLedger) CommitLegacy(blockAndPvtdata *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   283  	mock.Lock()
   284  	defer mock.Unlock()
   285  
   286  	if blockAndPvtdata != nil && blockAndPvtdata.Block != nil {
   287  		mock.ledger[blockAndPvtdata.Block.Header.Number] = blockAndPvtdata
   288  		return nil
   289  	}
   290  	return errors.New("invalid input parameters for block and private data param")
   291  }
   292  
   293  func (mock *ramLedger) GetBlockchainInfo() (*pcomm.BlockchainInfo, error) {
   294  	mock.RLock()
   295  	defer mock.RUnlock()
   296  
   297  	currentBlock := mock.ledger[uint64(len(mock.ledger)-1)].Block
   298  	return &pcomm.BlockchainInfo{
   299  		Height:            currentBlock.Header.Number + 1,
   300  		CurrentBlockHash:  protoutil.BlockHeaderHash(currentBlock.Header),
   301  		PreviousBlockHash: currentBlock.Header.PreviousHash,
   302  	}, nil
   303  }
   304  
   305  func (mock *ramLedger) DoesPvtDataInfoExist(blkNum uint64) (bool, error) {
   306  	return false, nil
   307  }
   308  
   309  func (mock *ramLedger) GetBlockByNumber(blockNumber uint64) (*pcomm.Block, error) {
   310  	mock.RLock()
   311  	defer mock.RUnlock()
   312  
   313  	if blockAndPvtData, ok := mock.ledger[blockNumber]; !ok {
   314  		return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNumber))
   315  	} else {
   316  		return blockAndPvtData.Block, nil
   317  	}
   318  }
   319  
   320  func (mock *ramLedger) Close() {
   321  
   322  }
   323  
   324  // Create new instance of KVLedger to be used for testing
   325  func newCommitter() committer.Committer {
   326  	cb, _ := test.MakeGenesisBlock("testChain")
   327  	ldgr := &ramLedger{
   328  		ledger: make(map[uint64]*ledger.BlockAndPvtData),
   329  	}
   330  	ldgr.CommitLegacy(&ledger.BlockAndPvtData{Block: cb}, &ledger.CommitOptions{})
   331  	return committer.NewLedgerCommitter(ldgr)
   332  }
   333  
   334  func newPeerNodeWithGossip(id int, committer committer.Committer,
   335  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, bootPorts ...int) *peerNode {
   336  	return newPeerNodeWithGossipWithValidator(id, committer, acceptor, g, &validator.MockValidator{}, bootPorts...)
   337  }
   338  
   339  // Constructing pseudo peer node, simulating only gossip and state transfer part
   340  func newPeerNodeWithGossipWithValidatorWithMetrics(id int, committer committer.Committer,
   341  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, v txvalidator.Validator,
   342  	gossipMetrics *metrics.GossipMetrics, bootPorts ...int) (node *peerNode, port int) {
   343  	cs := &cryptoServiceMock{acceptor: acceptor}
   344  	port, gRPCServer, certs, secureDialOpts, _ := gossiputil.CreateGRPCLayer()
   345  
   346  	if g == nil {
   347  		config := &gossip.Config{
   348  			BindPort:                     port,
   349  			BootstrapPeers:               bootPeersWithPorts(bootPorts...),
   350  			ID:                           fmt.Sprintf("p%d", id),
   351  			MaxBlockCountToStore:         0,
   352  			MaxPropagationBurstLatency:   time.Duration(10) * time.Millisecond,
   353  			MaxPropagationBurstSize:      10,
   354  			PropagateIterations:          1,
   355  			PropagatePeerNum:             3,
   356  			PullInterval:                 time.Duration(4) * time.Second,
   357  			PullPeerNum:                  5,
   358  			InternalEndpoint:             fmt.Sprintf("127.0.0.1:%d", port),
   359  			PublishCertPeriod:            10 * time.Second,
   360  			RequestStateInfoInterval:     4 * time.Second,
   361  			PublishStateInfoInterval:     4 * time.Second,
   362  			TimeForMembershipTracker:     5 * time.Second,
   363  			TLSCerts:                     certs,
   364  			DigestWaitTime:               algo.DefDigestWaitTime,
   365  			RequestWaitTime:              algo.DefRequestWaitTime,
   366  			ResponseWaitTime:             algo.DefResponseWaitTime,
   367  			DialTimeout:                  comm.DefDialTimeout,
   368  			ConnTimeout:                  comm.DefConnTimeout,
   369  			RecvBuffSize:                 comm.DefRecvBuffSize,
   370  			SendBuffSize:                 comm.DefSendBuffSize,
   371  			MsgExpirationTimeout:         channel.DefMsgExpirationTimeout,
   372  			AliveTimeInterval:            discovery.DefAliveTimeInterval,
   373  			AliveExpirationTimeout:       discovery.DefAliveExpirationTimeout,
   374  			AliveExpirationCheckInterval: discovery.DefAliveExpirationCheckInterval,
   375  			ReconnectInterval:            discovery.DefReconnectInterval,
   376  		}
   377  
   378  		selfID := api.PeerIdentityType(config.InternalEndpoint)
   379  		mcs := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
   380  		g = gossip.New(config, gRPCServer.Server(), &orgCryptoService{}, mcs, selfID, secureDialOpts, gossipMetrics)
   381  	}
   382  
   383  	g.JoinChan(&joinChanMsg{}, common.ChannelID("testchannelid"))
   384  
   385  	go func() {
   386  		gRPCServer.Start()
   387  	}()
   388  
   389  	// Initialize pseudo peer simulator, which has only three
   390  	// basic parts
   391  
   392  	servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: cs}
   393  	coordConfig := privdata.CoordinatorConfig{
   394  		PullRetryThreshold:             0,
   395  		TransientBlockRetention:        1000,
   396  		SkipPullingInvalidTransactions: false,
   397  	}
   398  	capabilityProvider := &capabilitymock.CapabilityProvider{}
   399  	appCapability := &capabilitymock.AppCapabilities{}
   400  	capabilityProvider.On("Capabilities").Return(appCapability)
   401  	appCapability.On("StorePvtDataOfInvalidTx").Return(true)
   402  	coord := privdata.NewCoordinator(privdata.Support{
   403  		Validator:          v,
   404  		Committer:          committer,
   405  		CapabilityProvider: capabilityProvider,
   406  	}, &transientstore.Store{}, protoutil.SignedData{}, gossipMetrics.PrivdataMetrics, coordConfig, nil)
   407  	stateConfig := &StateConfig{
   408  		StateCheckInterval:   DefStateCheckInterval,
   409  		StateResponseTimeout: DefStateResponseTimeout,
   410  		StateBatchSize:       DefStateBatchSize,
   411  		StateMaxRetries:      DefStateMaxRetries,
   412  		StateBlockBufferSize: DefStateBlockBufferSize,
   413  		StateChannelSize:     DefStateChannelSize,
   414  		StateEnabled:         DefStateEnabled,
   415  	}
   416  	sp := NewGossipStateProvider("testchannelid", servicesAdapater, coord, gossipMetrics.StateMetrics, blocking, stateConfig)
   417  	if sp == nil {
   418  		gRPCServer.Stop()
   419  		return nil, port
   420  	}
   421  
   422  	return &peerNode{
   423  		port:   port,
   424  		g:      g,
   425  		s:      sp.(*GossipStateProviderImpl),
   426  		commit: committer,
   427  		cs:     cs,
   428  		grpc:   gRPCServer,
   429  	}, port
   430  
   431  }
   432  
   433  // add metrics provider for metrics testing
   434  func newPeerNodeWithGossipWithMetrics(id int, committer committer.Committer,
   435  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, gossipMetrics *metrics.GossipMetrics) *peerNode {
   436  	node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(id, committer, acceptor, g,
   437  		&validator.MockValidator{}, gossipMetrics)
   438  	return node
   439  }
   440  
   441  // Constructing pseudo peer node, simulating only gossip and state transfer part
   442  func newPeerNodeWithGossipWithValidator(id int, committer committer.Committer,
   443  	acceptor peerIdentityAcceptor, g peerNodeGossipSupport, v txvalidator.Validator, bootPorts ...int) *peerNode {
   444  	gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
   445  	node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(id, committer, acceptor, g, v, gossipMetrics, bootPorts...)
   446  	return node
   447  }
   448  
   449  // Constructing pseudo peer node, simulating only gossip and state transfer part
   450  func newPeerNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor, bootPorts ...int) *peerNode {
   451  	return newPeerNodeWithGossip(id, committer, acceptor, nil, bootPorts...)
   452  }
   453  
   454  // Constructing pseudo boot node, simulating only gossip and state transfer part, return port
   455  func newBootNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor) (node *peerNode, port int) {
   456  	v := &validator.MockValidator{}
   457  	gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
   458  	return newPeerNodeWithGossipWithValidatorWithMetrics(id, committer, acceptor, nil, v, gossipMetrics)
   459  }
   460  
   461  func TestNilDirectMsg(t *testing.T) {
   462  	t.Parallel()
   463  	mc := &mockCommitter{Mock: &mock.Mock{}}
   464  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   465  	g := &mocks.GossipMock{}
   466  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   467  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   468  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   469  	defer p.shutdown()
   470  	p.s.handleStateRequest(nil)
   471  	p.s.directMessage(nil)
   472  	sMsg, _ := protoext.NoopSign(p.s.stateRequestMessage(uint64(10), uint64(8)))
   473  	req := &comm.ReceivedMessageImpl{
   474  		SignedGossipMessage: sMsg,
   475  	}
   476  	p.s.directMessage(req)
   477  }
   478  
   479  func TestNilAddPayload(t *testing.T) {
   480  	t.Parallel()
   481  	mc := &mockCommitter{Mock: &mock.Mock{}}
   482  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   483  	g := &mocks.GossipMock{}
   484  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   485  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   486  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   487  	defer p.shutdown()
   488  	err := p.s.AddPayload(nil)
   489  	assert.Error(t, err)
   490  	assert.Contains(t, err.Error(), "nil")
   491  }
   492  
   493  func TestAddPayloadLedgerUnavailable(t *testing.T) {
   494  	t.Parallel()
   495  	mc := &mockCommitter{Mock: &mock.Mock{}}
   496  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   497  	g := &mocks.GossipMock{}
   498  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   499  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   500  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   501  	defer p.shutdown()
   502  	// Simulate a problem in the ledger
   503  	failedLedger := mock.Mock{}
   504  	failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger"))
   505  	mc.Lock()
   506  	mc.Mock = &failedLedger
   507  	mc.Unlock()
   508  
   509  	rawblock := protoutil.NewBlock(uint64(1), []byte{})
   510  	b, _ := pb.Marshal(rawblock)
   511  	err := p.s.AddPayload(&proto.Payload{
   512  		SeqNum: uint64(1),
   513  		Data:   b,
   514  	})
   515  	assert.Error(t, err)
   516  	assert.Contains(t, err.Error(), "Failed obtaining ledger height")
   517  	assert.Contains(t, err.Error(), "cannot query ledger")
   518  }
   519  
   520  func TestLargeBlockGap(t *testing.T) {
   521  	// Scenario: the peer knows of a peer who has a ledger height much higher
   522  	// than itself (500 blocks higher).
   523  	// The peer needs to ask blocks in a way such that the size of the payload buffer
   524  	// never rises above a certain threshold.
   525  	t.Parallel()
   526  	mc := &mockCommitter{Mock: &mock.Mock{}}
   527  	blocksPassedToLedger := make(chan uint64, 200)
   528  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   529  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   530  	})
   531  	msgsFromPeer := make(chan protoext.ReceivedMessage)
   532  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   533  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   534  	g := &mocks.GossipMock{}
   535  	membership := []discovery.NetworkMember{
   536  		{
   537  			PKIid:    common.PKIidType("a"),
   538  			Endpoint: "a",
   539  			Properties: &proto.Properties{
   540  				LedgerHeight: 500,
   541  			},
   542  		}}
   543  	g.On("PeersOfChannel", mock.Anything).Return(membership)
   544  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   545  	g.On("Accept", mock.Anything, true).Return(nil, msgsFromPeer)
   546  	g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   547  		msg := arguments.Get(0).(*proto.GossipMessage)
   548  		// The peer requested a state request
   549  		req := msg.GetStateRequest()
   550  		// Construct a skeleton for the response
   551  		res := &proto.GossipMessage{
   552  			Nonce:   msg.Nonce,
   553  			Channel: []byte("testchannelid"),
   554  			Content: &proto.GossipMessage_StateResponse{
   555  				StateResponse: &proto.RemoteStateResponse{},
   556  			},
   557  		}
   558  		// Populate the response with payloads according to what the peer asked
   559  		for seq := req.StartSeqNum; seq <= req.EndSeqNum; seq++ {
   560  			rawblock := protoutil.NewBlock(seq, []byte{})
   561  			b, _ := pb.Marshal(rawblock)
   562  			payload := &proto.Payload{
   563  				SeqNum: seq,
   564  				Data:   b,
   565  			}
   566  			res.GetStateResponse().Payloads = append(res.GetStateResponse().Payloads, payload)
   567  		}
   568  		// Finally, send the response down the channel the peer expects to receive it from
   569  		sMsg, _ := protoext.NoopSign(res)
   570  		msgsFromPeer <- &comm.ReceivedMessageImpl{
   571  			SignedGossipMessage: sMsg,
   572  		}
   573  	})
   574  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   575  	defer p.shutdown()
   576  
   577  	// Process blocks at a speed of 20 Millisecond for each block.
   578  	// The imaginative peer that responds to state
   579  	// If the payload buffer expands above defMaxBlockDistance*2 + defAntiEntropyBatchSize blocks, fail the test
   580  	blockProcessingTime := 20 * time.Millisecond // 10 seconds for total 500 blocks
   581  	expectedSequence := 1
   582  	for expectedSequence < 500 {
   583  		blockSeq := <-blocksPassedToLedger
   584  		assert.Equal(t, expectedSequence, int(blockSeq))
   585  		// Ensure payload buffer isn't over-populated
   586  		assert.True(t, p.s.payloads.Size() <= defMaxBlockDistance*2+defAntiEntropyBatchSize, "payload buffer size is %d", p.s.payloads.Size())
   587  		expectedSequence++
   588  		time.Sleep(blockProcessingTime)
   589  	}
   590  }
   591  
   592  func TestOverPopulation(t *testing.T) {
   593  	// Scenario: Add to the state provider blocks
   594  	// with a gap in between, and ensure that the payload buffer
   595  	// rejects blocks starting if the distance between the ledger height to the latest
   596  	// block it contains is bigger than defMaxBlockDistance.
   597  	t.Parallel()
   598  	mc := &mockCommitter{Mock: &mock.Mock{}}
   599  	blocksPassedToLedger := make(chan uint64, 10)
   600  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   601  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   602  	})
   603  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   604  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   605  	g := &mocks.GossipMock{}
   606  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   607  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   608  	p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
   609  	defer p.shutdown()
   610  
   611  	// Add some blocks in a sequential manner and make sure it works
   612  	for i := 1; i <= 4; i++ {
   613  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   614  		b, _ := pb.Marshal(rawblock)
   615  		assert.NoError(t, p.s.addPayload(&proto.Payload{
   616  			SeqNum: uint64(i),
   617  			Data:   b,
   618  		}, nonBlocking))
   619  	}
   620  
   621  	// Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9]
   622  	// Should succeed
   623  	for i := 10; i <= defMaxBlockDistance; i++ {
   624  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   625  		b, _ := pb.Marshal(rawblock)
   626  		assert.NoError(t, p.s.addPayload(&proto.Payload{
   627  			SeqNum: uint64(i),
   628  			Data:   b,
   629  		}, nonBlocking))
   630  	}
   631  
   632  	// Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10
   633  	// Should fail.
   634  	for i := defMaxBlockDistance + 1; i <= defMaxBlockDistance*10; i++ {
   635  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
   636  		b, _ := pb.Marshal(rawblock)
   637  		assert.Error(t, p.s.addPayload(&proto.Payload{
   638  			SeqNum: uint64(i),
   639  			Data:   b,
   640  		}, nonBlocking))
   641  	}
   642  
   643  	// Ensure only blocks 1-4 were passed to the ledger
   644  	close(blocksPassedToLedger)
   645  	i := 1
   646  	for seq := range blocksPassedToLedger {
   647  		assert.Equal(t, uint64(i), seq)
   648  		i++
   649  	}
   650  	assert.Equal(t, 5, i)
   651  
   652  	// Ensure we don't store too many blocks in memory
   653  	sp := p.s
   654  	assert.True(t, sp.payloads.Size() < defMaxBlockDistance)
   655  }
   656  
   657  func TestBlockingEnqueue(t *testing.T) {
   658  	// Scenario: In parallel, get blocks from gossip and from the orderer.
   659  	// The blocks from the orderer we get are X2 times the amount of blocks from gossip.
   660  	// The blocks we get from gossip are random indices, to maximize disruption.
   661  	t.Parallel()
   662  	mc := &mockCommitter{Mock: &mock.Mock{}}
   663  	blocksPassedToLedger := make(chan uint64, 10)
   664  	mc.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   665  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   666  	})
   667  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   668  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   669  	g := &mocks.GossipMock{}
   670  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   671  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   672  	p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
   673  	defer p.shutdown()
   674  
   675  	numBlocksReceived := 500
   676  	receivedBlockCount := 0
   677  	// Get a block from the orderer every 1ms
   678  	go func() {
   679  		for i := 1; i <= numBlocksReceived; i++ {
   680  			rawblock := protoutil.NewBlock(uint64(i), []byte{})
   681  			b, _ := pb.Marshal(rawblock)
   682  			block := &proto.Payload{
   683  				SeqNum: uint64(i),
   684  				Data:   b,
   685  			}
   686  			p.s.AddPayload(block)
   687  			time.Sleep(time.Millisecond)
   688  		}
   689  	}()
   690  
   691  	// Get a block from gossip every 1ms too
   692  	go func() {
   693  		rand.Seed(time.Now().UnixNano())
   694  		for i := 1; i <= numBlocksReceived/2; i++ {
   695  			blockSeq := rand.Intn(numBlocksReceived)
   696  			rawblock := protoutil.NewBlock(uint64(blockSeq), []byte{})
   697  			b, _ := pb.Marshal(rawblock)
   698  			block := &proto.Payload{
   699  				SeqNum: uint64(blockSeq),
   700  				Data:   b,
   701  			}
   702  			p.s.addPayload(block, nonBlocking)
   703  			time.Sleep(time.Millisecond)
   704  		}
   705  	}()
   706  
   707  	for {
   708  		receivedBlock := <-blocksPassedToLedger
   709  		receivedBlockCount++
   710  		m := &mock.Mock{}
   711  		m.On("LedgerHeight", mock.Anything).Return(receivedBlock, nil)
   712  		m.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   713  		m.On("CommitLegacy", mock.Anything).Run(func(arg mock.Arguments) {
   714  			blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   715  		})
   716  		mc.Lock()
   717  		mc.Mock = m
   718  		mc.Unlock()
   719  		assert.Equal(t, receivedBlock, uint64(receivedBlockCount))
   720  		if int(receivedBlockCount) == numBlocksReceived {
   721  			break
   722  		}
   723  		time.Sleep(time.Millisecond * 10)
   724  	}
   725  }
   726  
   727  func TestHaltChainProcessing(t *testing.T) {
   728  	gossipChannel := func(c chan *proto.GossipMessage) <-chan *proto.GossipMessage {
   729  		return c
   730  	}
   731  	makeBlock := func(seq int) []byte {
   732  		b := &pcomm.Block{
   733  			Header: &pcomm.BlockHeader{
   734  				Number: uint64(seq),
   735  			},
   736  			Data: &pcomm.BlockData{
   737  				Data: [][]byte{},
   738  			},
   739  			Metadata: &pcomm.BlockMetadata{
   740  				Metadata: [][]byte{
   741  					{}, {}, {}, {},
   742  				},
   743  			},
   744  		}
   745  		data, _ := pb.Marshal(b)
   746  		return data
   747  	}
   748  	newBlockMsg := func(i int) *proto.GossipMessage {
   749  		return &proto.GossipMessage{
   750  			Channel: []byte("testchannelid"),
   751  			Content: &proto.GossipMessage_DataMsg{
   752  				DataMsg: &proto.DataMessage{
   753  					Payload: &proto.Payload{
   754  						SeqNum: uint64(i),
   755  						Data:   makeBlock(i),
   756  					},
   757  				},
   758  			},
   759  		}
   760  	}
   761  
   762  	buf := gbytes.NewBuffer()
   763  	logging, err := flogging.New(flogging.Config{
   764  		LogSpec: "debug",
   765  		Writer:  buf,
   766  	})
   767  	assert.NoError(t, err, "failed to create logging")
   768  
   769  	defer func(l gossiputil.Logger) { logger = l }(logger)
   770  	l := logging.Logger("state_test")
   771  	logger = l
   772  
   773  	mc := &mockCommitter{Mock: &mock.Mock{}}
   774  	mc.On("CommitLegacy", mock.Anything)
   775  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   776  	g := &mocks.GossipMock{}
   777  	gossipMsgs := make(chan *proto.GossipMessage)
   778  
   779  	g.On("Accept", mock.Anything, false).Return(gossipChannel(gossipMsgs), nil)
   780  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   781  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   782  
   783  	v := &validator.MockValidator{}
   784  	v.On("Validate").Return(&errors2.VSCCExecutionFailureError{
   785  		Err: errors.New("foobar"),
   786  	}).Once()
   787  	peerNode := newPeerNodeWithGossipWithValidator(0, mc, noopPeerIdentityAcceptor, g, v)
   788  	defer peerNode.shutdown()
   789  	gossipMsgs <- newBlockMsg(1)
   790  
   791  	assertLogged(t, buf, "Got error while committing")
   792  	assertLogged(t, buf, "Aborting chain processing")
   793  	assertLogged(t, buf, "foobar")
   794  }
   795  
   796  func TestFailures(t *testing.T) {
   797  	t.Parallel()
   798  	mc := &mockCommitter{Mock: &mock.Mock{}}
   799  	mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil)
   800  	g := &mocks.GossipMock{}
   801  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   802  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   803  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   804  	assert.Panics(t, func() {
   805  		newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   806  	})
   807  	// Reprogram mock
   808  	mc.Mock = &mock.Mock{}
   809  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger"))
   810  	assert.Nil(t, newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g))
   811  }
   812  
   813  func TestGossipReception(t *testing.T) {
   814  	t.Parallel()
   815  	signalChan := make(chan struct{})
   816  	rawblock := &pcomm.Block{
   817  		Header: &pcomm.BlockHeader{
   818  			Number: uint64(1),
   819  		},
   820  		Data: &pcomm.BlockData{
   821  			Data: [][]byte{},
   822  		},
   823  		Metadata: &pcomm.BlockMetadata{
   824  			Metadata: [][]byte{
   825  				{}, {}, {}, {},
   826  			},
   827  		},
   828  	}
   829  	b, _ := pb.Marshal(rawblock)
   830  
   831  	newMsg := func(channel string) *proto.GossipMessage {
   832  		{
   833  			return &proto.GossipMessage{
   834  				Channel: []byte(channel),
   835  				Content: &proto.GossipMessage_DataMsg{
   836  					DataMsg: &proto.DataMessage{
   837  						Payload: &proto.Payload{
   838  							SeqNum: 1,
   839  							Data:   b,
   840  						},
   841  					},
   842  				},
   843  			}
   844  		}
   845  	}
   846  
   847  	createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage {
   848  		c := make(chan *proto.GossipMessage)
   849  
   850  		go func(c chan *proto.GossipMessage) {
   851  			// Wait for Accept() to be called
   852  			<-signalChan
   853  			// Simulate a message reception from the gossip component with an invalid channel
   854  			c <- newMsg("AAA")
   855  			// Simulate a message reception from the gossip component
   856  			c <- newMsg("testchannelid")
   857  		}(c)
   858  		return c
   859  	}
   860  
   861  	g := &mocks.GossipMock{}
   862  	rmc := createChan(signalChan)
   863  	g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) {
   864  		signalChan <- struct{}{}
   865  	})
   866  	g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   867  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
   868  	mc := &mockCommitter{Mock: &mock.Mock{}}
   869  	receivedChan := make(chan struct{})
   870  	mc.On("CommitLegacy", mock.Anything).Run(func(arguments mock.Arguments) {
   871  		block := arguments.Get(0).(*pcomm.Block)
   872  		assert.Equal(t, uint64(1), block.Header.Number)
   873  		receivedChan <- struct{}{}
   874  	})
   875  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   876  	mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
   877  	p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   878  	defer p.shutdown()
   879  	select {
   880  	case <-receivedChan:
   881  	case <-time.After(time.Second * 15):
   882  		assert.Fail(t, "Didn't commit a block within a timely manner")
   883  	}
   884  }
   885  
   886  func TestLedgerHeightFromProperties(t *testing.T) {
   887  	// Scenario: For each test, spawn a peer and supply it
   888  	// with a specific mock of PeersOfChannel from peers that
   889  	// either set both metadata properly, or only the properties, or none, or both.
   890  	// Ensure the logic handles all of the 4 possible cases as needed
   891  
   892  	t.Parallel()
   893  	// Returns whether the given networkMember was selected or not
   894  	wasNetworkMemberSelected := func(t *testing.T, networkMember discovery.NetworkMember, wg *sync.WaitGroup) bool {
   895  		var wasGivenNetworkMemberSelected int32
   896  		finChan := make(chan struct{})
   897  		g := &mocks.GossipMock{}
   898  		g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   899  			defer wg.Done()
   900  			msg := arguments.Get(0).(*proto.GossipMessage)
   901  			assert.NotNil(t, msg.GetStateRequest())
   902  			peer := arguments.Get(1).([]*comm.RemotePeer)[0]
   903  			if bytes.Equal(networkMember.PKIid, peer.PKIID) {
   904  				atomic.StoreInt32(&wasGivenNetworkMemberSelected, 1)
   905  			}
   906  			finChan <- struct{}{}
   907  		})
   908  		g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   909  		g.On("Accept", mock.Anything, true).Return(nil, make(chan protoext.ReceivedMessage))
   910  		defaultPeer := discovery.NetworkMember{
   911  			InternalEndpoint: "b",
   912  			PKIid:            common.PKIidType("b"),
   913  			Properties: &proto.Properties{
   914  				LedgerHeight: 5,
   915  			},
   916  		}
   917  		g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{
   918  			defaultPeer,
   919  			networkMember,
   920  		})
   921  		mc := &mockCommitter{Mock: &mock.Mock{}}
   922  		mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   923  		p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
   924  		defer p.shutdown()
   925  		select {
   926  		case <-time.After(time.Second * 20):
   927  			t.Fatal("Didn't send a request within a timely manner")
   928  		case <-finChan:
   929  		}
   930  		return atomic.LoadInt32(&wasGivenNetworkMemberSelected) == 1
   931  	}
   932  
   933  	peerWithProperties := discovery.NetworkMember{
   934  		PKIid: common.PKIidType("peerWithoutMetadata"),
   935  		Properties: &proto.Properties{
   936  			LedgerHeight: 10,
   937  		},
   938  		InternalEndpoint: "peerWithoutMetadata",
   939  	}
   940  
   941  	peerWithoutProperties := discovery.NetworkMember{
   942  		PKIid:            common.PKIidType("peerWithoutProperties"),
   943  		InternalEndpoint: "peerWithoutProperties",
   944  	}
   945  
   946  	tests := []struct {
   947  		shouldGivenBeSelected bool
   948  		member                discovery.NetworkMember
   949  	}{
   950  		{member: peerWithProperties, shouldGivenBeSelected: true},
   951  		{member: peerWithoutProperties, shouldGivenBeSelected: false},
   952  	}
   953  
   954  	var wg sync.WaitGroup
   955  	wg.Add(len(tests))
   956  	for _, tst := range tests {
   957  		go func(shouldGivenBeSelected bool, member discovery.NetworkMember) {
   958  			assert.Equal(t, shouldGivenBeSelected, wasNetworkMemberSelected(t, member, &wg))
   959  		}(tst.shouldGivenBeSelected, tst.member)
   960  	}
   961  	wg.Wait()
   962  }
   963  
   964  func TestAccessControl(t *testing.T) {
   965  	t.Parallel()
   966  	bootstrapSetSize := 5
   967  	bootstrapSet := make([]*peerNode, 0)
   968  
   969  	authorizedPeersSize := 4
   970  	var listeners []net.Listener
   971  	var endpoints []string
   972  
   973  	for i := 0; i < authorizedPeersSize; i++ {
   974  		ll, err := net.Listen("tcp", "127.0.0.1:0")
   975  		assert.NoError(t, err)
   976  		listeners = append(listeners, ll)
   977  		endpoint := ll.Addr().String()
   978  		endpoints = append(endpoints, endpoint)
   979  	}
   980  
   981  	defer func() {
   982  		for _, ll := range listeners {
   983  			ll.Close()
   984  		}
   985  	}()
   986  
   987  	authorizedPeers := map[string]struct{}{
   988  		endpoints[0]: {},
   989  		endpoints[1]: {},
   990  		endpoints[2]: {},
   991  		endpoints[3]: {},
   992  	}
   993  
   994  	blockPullPolicy := func(identity api.PeerIdentityType) error {
   995  		if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized {
   996  			return nil
   997  		}
   998  		return errors.New("Not authorized")
   999  	}
  1000  
  1001  	var bootPorts []int
  1002  
  1003  	for i := 0; i < bootstrapSetSize; i++ {
  1004  		commit := newCommitter()
  1005  		bootPeer, bootPort := newBootNode(i, commit, blockPullPolicy)
  1006  		bootstrapSet = append(bootstrapSet, bootPeer)
  1007  		bootPorts = append(bootPorts, bootPort)
  1008  	}
  1009  
  1010  	defer func() {
  1011  		for _, p := range bootstrapSet {
  1012  			p.shutdown()
  1013  		}
  1014  	}()
  1015  
  1016  	msgCount := 5
  1017  
  1018  	for i := 1; i <= msgCount; i++ {
  1019  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1020  		if b, err := pb.Marshal(rawblock); err == nil {
  1021  			payload := &proto.Payload{
  1022  				SeqNum: uint64(i),
  1023  				Data:   b,
  1024  			}
  1025  			bootstrapSet[0].s.AddPayload(payload)
  1026  		} else {
  1027  			t.Fail()
  1028  		}
  1029  	}
  1030  
  1031  	standardPeerSetSize := 10
  1032  	peersSet := make([]*peerNode, 0)
  1033  
  1034  	for i := 0; i < standardPeerSetSize; i++ {
  1035  		commit := newCommitter()
  1036  		peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, blockPullPolicy, bootPorts...))
  1037  	}
  1038  
  1039  	defer func() {
  1040  		for _, p := range peersSet {
  1041  			p.shutdown()
  1042  		}
  1043  	}()
  1044  
  1045  	waitUntilTrueOrTimeout(t, func() bool {
  1046  		for _, p := range peersSet {
  1047  			if len(p.g.PeersOfChannel(common.ChannelID("testchannelid"))) != bootstrapSetSize+standardPeerSetSize-1 {
  1048  				t.Log("Peer discovery has not finished yet")
  1049  				return false
  1050  			}
  1051  		}
  1052  		t.Log("All peer discovered each other!!!")
  1053  		return true
  1054  	}, 30*time.Second)
  1055  
  1056  	t.Log("Waiting for all blocks to arrive.")
  1057  	waitUntilTrueOrTimeout(t, func() bool {
  1058  		t.Log("Trying to see all authorized peers get all blocks, and all non-authorized didn't")
  1059  		for _, p := range peersSet {
  1060  			height, err := p.commit.LedgerHeight()
  1061  			id := fmt.Sprintf("127.0.0.1:%d", p.port)
  1062  			if _, isAuthorized := authorizedPeers[id]; isAuthorized {
  1063  				if height != uint64(msgCount+1) || err != nil {
  1064  					return false
  1065  				}
  1066  			} else {
  1067  				if err == nil && height > 1 {
  1068  					assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height)
  1069  				}
  1070  			}
  1071  		}
  1072  		t.Log("All peers have same ledger height!!!")
  1073  		return true
  1074  	}, 60*time.Second)
  1075  }
  1076  
  1077  func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) {
  1078  	t.Parallel()
  1079  	bootstrapSetSize := 5
  1080  	bootstrapSet := make([]*peerNode, 0)
  1081  
  1082  	var bootPorts []int
  1083  
  1084  	for i := 0; i < bootstrapSetSize; i++ {
  1085  		commit := newCommitter()
  1086  		bootPeer, bootPort := newBootNode(i, commit, noopPeerIdentityAcceptor)
  1087  		bootstrapSet = append(bootstrapSet, bootPeer)
  1088  		bootPorts = append(bootPorts, bootPort)
  1089  	}
  1090  
  1091  	defer func() {
  1092  		for _, p := range bootstrapSet {
  1093  			p.shutdown()
  1094  		}
  1095  	}()
  1096  
  1097  	msgCount := 10
  1098  
  1099  	for i := 1; i <= msgCount; i++ {
  1100  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1101  		if b, err := pb.Marshal(rawblock); err == nil {
  1102  			payload := &proto.Payload{
  1103  				SeqNum: uint64(i),
  1104  				Data:   b,
  1105  			}
  1106  			bootstrapSet[0].s.AddPayload(payload)
  1107  		} else {
  1108  			t.Fail()
  1109  		}
  1110  	}
  1111  
  1112  	standartPeersSize := 10
  1113  	peersSet := make([]*peerNode, 0)
  1114  
  1115  	for i := 0; i < standartPeersSize; i++ {
  1116  		commit := newCommitter()
  1117  		peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, noopPeerIdentityAcceptor, bootPorts...))
  1118  	}
  1119  
  1120  	defer func() {
  1121  		for _, p := range peersSet {
  1122  			p.shutdown()
  1123  		}
  1124  	}()
  1125  
  1126  	waitUntilTrueOrTimeout(t, func() bool {
  1127  		for _, p := range peersSet {
  1128  			if len(p.g.PeersOfChannel(common.ChannelID("testchannelid"))) != bootstrapSetSize+standartPeersSize-1 {
  1129  				t.Log("Peer discovery has not finished yet")
  1130  				return false
  1131  			}
  1132  		}
  1133  		t.Log("All peer discovered each other!!!")
  1134  		return true
  1135  	}, 30*time.Second)
  1136  
  1137  	t.Log("Waiting for all blocks to arrive.")
  1138  	waitUntilTrueOrTimeout(t, func() bool {
  1139  		t.Log("Trying to see all peers get all blocks")
  1140  		for _, p := range peersSet {
  1141  			height, err := p.commit.LedgerHeight()
  1142  			if height != uint64(msgCount+1) || err != nil {
  1143  				return false
  1144  			}
  1145  		}
  1146  		t.Log("All peers have same ledger height!!!")
  1147  		return true
  1148  	}, 60*time.Second)
  1149  }
  1150  
  1151  // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into
  1152  // local ledger, next spawning a new peer waiting for anti-entropy procedure to
  1153  // complete missing blocks. Since state transfer messages now batched, it is expected
  1154  // to see _exactly_ two messages with state transfer response.
  1155  func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) {
  1156  	t.Parallel()
  1157  	bootPeer, bootPort := newBootNode(0, newCommitter(), noopPeerIdentityAcceptor)
  1158  	defer bootPeer.shutdown()
  1159  
  1160  	msgCount := defAntiEntropyBatchSize + 5
  1161  	expectedMessagesCnt := 2
  1162  
  1163  	for i := 1; i <= msgCount; i++ {
  1164  		rawblock := protoutil.NewBlock(uint64(i), []byte{})
  1165  		if b, err := pb.Marshal(rawblock); err == nil {
  1166  			payload := &proto.Payload{
  1167  				SeqNum: uint64(i),
  1168  				Data:   b,
  1169  			}
  1170  			bootPeer.s.AddPayload(payload)
  1171  		} else {
  1172  			t.Fail()
  1173  		}
  1174  	}
  1175  
  1176  	peer := newPeerNode(1, newCommitter(), noopPeerIdentityAcceptor, bootPort)
  1177  	defer peer.shutdown()
  1178  
  1179  	naiveStateMsgPredicate := func(message interface{}) bool {
  1180  		return protoext.IsRemoteStateMessage(message.(protoext.ReceivedMessage).GetGossipMessage().GossipMessage)
  1181  	}
  1182  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
  1183  
  1184  	wg := sync.WaitGroup{}
  1185  	wg.Add(expectedMessagesCnt)
  1186  
  1187  	// Number of submitted messages is defAntiEntropyBatchSize + 5, therefore
  1188  	// expected number of batches is expectedMessagesCnt = 2. Following go routine
  1189  	// makes sure it receives expected amount of messages and sends signal of success
  1190  	// to continue the test
  1191  	go func() {
  1192  		for count := 0; count < expectedMessagesCnt; count++ {
  1193  			<-peerCh
  1194  			wg.Done()
  1195  		}
  1196  	}()
  1197  
  1198  	// Once we got message which indicate of two batches being received,
  1199  	// making sure messages indeed committed.
  1200  	waitUntilTrueOrTimeout(t, func() bool {
  1201  		if len(peer.g.PeersOfChannel(common.ChannelID("testchannelid"))) != 1 {
  1202  			t.Log("Peer discovery has not finished yet")
  1203  			return false
  1204  		}
  1205  		t.Log("All peer discovered each other!!!")
  1206  		return true
  1207  	}, 30*time.Second)
  1208  
  1209  	// Waits for message which indicates that expected number of message batches received
  1210  	// otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds
  1211  	wg.Wait()
  1212  
  1213  	t.Log("Waiting for all blocks to arrive.")
  1214  	waitUntilTrueOrTimeout(t, func() bool {
  1215  		t.Log("Trying to see all peers get all blocks")
  1216  		height, err := peer.commit.LedgerHeight()
  1217  		if height != uint64(msgCount+1) || err != nil {
  1218  			return false
  1219  		}
  1220  		t.Log("All peers have same ledger height!!!")
  1221  		return true
  1222  	}, 60*time.Second)
  1223  }
  1224  
  1225  // coordinatorMock mocking structure to capture mock interface for
  1226  // coord to simulate coord flow during the test
  1227  type coordinatorMock struct {
  1228  	committer.Committer
  1229  	mock.Mock
  1230  }
  1231  
  1232  func (mock *coordinatorMock) GetPvtDataAndBlockByNum(seqNum uint64, _ protoutil.SignedData) (*pcomm.Block, gutil.PvtDataCollections, error) {
  1233  	args := mock.Called(seqNum)
  1234  	return args.Get(0).(*pcomm.Block), args.Get(1).(gutil.PvtDataCollections), args.Error(2)
  1235  }
  1236  
  1237  func (mock *coordinatorMock) GetBlockByNum(seqNum uint64) (*pcomm.Block, error) {
  1238  	args := mock.Called(seqNum)
  1239  	return args.Get(0).(*pcomm.Block), args.Error(1)
  1240  }
  1241  
  1242  func (mock *coordinatorMock) StoreBlock(block *pcomm.Block, data gutil.PvtDataCollections) error {
  1243  	args := mock.Called(block, data)
  1244  	return args.Error(1)
  1245  }
  1246  
  1247  func (mock *coordinatorMock) LedgerHeight() (uint64, error) {
  1248  	args := mock.Called()
  1249  	return args.Get(0).(uint64), args.Error(1)
  1250  }
  1251  
  1252  func (mock *coordinatorMock) Close() {
  1253  	mock.Called()
  1254  }
  1255  
  1256  // StorePvtData used to persist private date into transient store
  1257  func (mock *coordinatorMock) StorePvtData(txid string, privData *tspb.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error {
  1258  	return mock.Called().Error(0)
  1259  }
  1260  
  1261  type receivedMessageMock struct {
  1262  	mock.Mock
  1263  }
  1264  
  1265  // Ack returns to the sender an acknowledgement for the message
  1266  func (mock *receivedMessageMock) Ack(err error) {
  1267  
  1268  }
  1269  
  1270  func (mock *receivedMessageMock) Respond(msg *proto.GossipMessage) {
  1271  	mock.Called(msg)
  1272  }
  1273  
  1274  func (mock *receivedMessageMock) GetGossipMessage() *protoext.SignedGossipMessage {
  1275  	args := mock.Called()
  1276  	return args.Get(0).(*protoext.SignedGossipMessage)
  1277  }
  1278  
  1279  func (mock *receivedMessageMock) GetSourceEnvelope() *proto.Envelope {
  1280  	args := mock.Called()
  1281  	return args.Get(0).(*proto.Envelope)
  1282  }
  1283  
  1284  func (mock *receivedMessageMock) GetConnectionInfo() *protoext.ConnectionInfo {
  1285  	args := mock.Called()
  1286  	return args.Get(0).(*protoext.ConnectionInfo)
  1287  }
  1288  
  1289  type testData struct {
  1290  	block   *pcomm.Block
  1291  	pvtData gutil.PvtDataCollections
  1292  }
  1293  
  1294  func TestTransferOfPrivateRWSet(t *testing.T) {
  1295  	t.Parallel()
  1296  	chainID := "testChainID"
  1297  
  1298  	// First gossip instance
  1299  	g := &mocks.GossipMock{}
  1300  	coord1 := new(coordinatorMock)
  1301  
  1302  	gossipChannel := make(chan *proto.GossipMessage)
  1303  	commChannel := make(chan protoext.ReceivedMessage)
  1304  
  1305  	gossipChannelFactory := func(ch chan *proto.GossipMessage) <-chan *proto.GossipMessage {
  1306  		return ch
  1307  	}
  1308  
  1309  	g.On("Accept", mock.Anything, false).Return(gossipChannelFactory(gossipChannel), nil)
  1310  	g.On("Accept", mock.Anything, true).Return(nil, commChannel)
  1311  
  1312  	g.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
  1313  	g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
  1314  	g.On("Close")
  1315  
  1316  	coord1.On("LedgerHeight", mock.Anything).Return(uint64(5), nil)
  1317  
  1318  	var data = map[uint64]*testData{
  1319  		uint64(2): {
  1320  			block: &pcomm.Block{
  1321  				Header: &pcomm.BlockHeader{
  1322  					Number:       2,
  1323  					DataHash:     []byte{0, 1, 1, 1},
  1324  					PreviousHash: []byte{0, 0, 0, 1},
  1325  				},
  1326  				Data: &pcomm.BlockData{
  1327  					Data: [][]byte{{1}, {2}, {3}},
  1328  				},
  1329  			},
  1330  			pvtData: gutil.PvtDataCollections{
  1331  				{
  1332  					SeqInBlock: uint64(0),
  1333  					WriteSet: &rwset.TxPvtReadWriteSet{
  1334  						DataModel: rwset.TxReadWriteSet_KV,
  1335  						NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1336  							{
  1337  								Namespace: "myCC:v1",
  1338  								CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1339  									{
  1340  										CollectionName: "mysecrectCollection",
  1341  										Rwset:          []byte{1, 2, 3, 4, 5},
  1342  									},
  1343  								},
  1344  							},
  1345  						},
  1346  					},
  1347  				},
  1348  			},
  1349  		},
  1350  
  1351  		uint64(3): {
  1352  			block: &pcomm.Block{
  1353  				Header: &pcomm.BlockHeader{
  1354  					Number:       3,
  1355  					DataHash:     []byte{1, 1, 1, 1},
  1356  					PreviousHash: []byte{0, 1, 1, 1},
  1357  				},
  1358  				Data: &pcomm.BlockData{
  1359  					Data: [][]byte{{4}, {5}, {6}},
  1360  				},
  1361  			},
  1362  			pvtData: gutil.PvtDataCollections{
  1363  				{
  1364  					SeqInBlock: uint64(2),
  1365  					WriteSet: &rwset.TxPvtReadWriteSet{
  1366  						DataModel: rwset.TxReadWriteSet_KV,
  1367  						NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1368  							{
  1369  								Namespace: "otherCC:v1",
  1370  								CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1371  									{
  1372  										CollectionName: "topClassified",
  1373  										Rwset:          []byte{0, 0, 0, 4, 2},
  1374  									},
  1375  								},
  1376  							},
  1377  						},
  1378  					},
  1379  				},
  1380  			},
  1381  		},
  1382  	}
  1383  
  1384  	for seqNum, each := range data {
  1385  		coord1.On("GetPvtDataAndBlockByNum", seqNum).Return(each.block, each.pvtData, nil /* no error*/)
  1386  	}
  1387  
  1388  	coord1.On("Close")
  1389  
  1390  	servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}}
  1391  	stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
  1392  	stateConfig := &StateConfig{
  1393  		StateCheckInterval:   DefStateCheckInterval,
  1394  		StateResponseTimeout: DefStateResponseTimeout,
  1395  		StateBatchSize:       DefStateBatchSize,
  1396  		StateMaxRetries:      DefStateMaxRetries,
  1397  		StateBlockBufferSize: DefStateBlockBufferSize,
  1398  		StateChannelSize:     DefStateChannelSize,
  1399  		StateEnabled:         DefStateEnabled,
  1400  	}
  1401  	st := NewGossipStateProvider(chainID, servicesAdapater, coord1, stateMetrics, blocking, stateConfig)
  1402  	defer st.Stop()
  1403  
  1404  	// Mocked state request message
  1405  	requestMsg := new(receivedMessageMock)
  1406  
  1407  	// Get state request message, blocks [2...3]
  1408  	requestGossipMsg := &proto.GossipMessage{
  1409  		// Copy nonce field from the request, so it will be possible to match response
  1410  		Nonce:   1,
  1411  		Tag:     proto.GossipMessage_CHAN_OR_ORG,
  1412  		Channel: []byte(chainID),
  1413  		Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{
  1414  			StartSeqNum: 2,
  1415  			EndSeqNum:   3,
  1416  		}},
  1417  	}
  1418  
  1419  	msg, _ := protoext.NoopSign(requestGossipMsg)
  1420  
  1421  	requestMsg.On("GetGossipMessage").Return(msg)
  1422  	requestMsg.On("GetConnectionInfo").Return(&protoext.ConnectionInfo{
  1423  		Auth: &protoext.AuthInfo{},
  1424  	})
  1425  
  1426  	// Channel to send responses back
  1427  	responseChannel := make(chan protoext.ReceivedMessage)
  1428  	defer close(responseChannel)
  1429  
  1430  	requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
  1431  		// Get gossip response to respond back on state request
  1432  		response := args.Get(0).(*proto.GossipMessage)
  1433  		// Wrap it up into received response
  1434  		receivedMsg := new(receivedMessageMock)
  1435  		// Create sign response
  1436  		msg, _ := protoext.NoopSign(response)
  1437  		// Mock to respond
  1438  		receivedMsg.On("GetGossipMessage").Return(msg)
  1439  		// Send response
  1440  		responseChannel <- receivedMsg
  1441  	})
  1442  
  1443  	// Send request message via communication channel into state transfer
  1444  	commChannel <- requestMsg
  1445  
  1446  	// State transfer request should result in state response back
  1447  	response := <-responseChannel
  1448  
  1449  	// Start the assertion section
  1450  	stateResponse := response.GetGossipMessage().GetStateResponse()
  1451  
  1452  	assertion := assert.New(t)
  1453  	// Nonce should be equal to Nonce of the request
  1454  	assertion.Equal(response.GetGossipMessage().Nonce, uint64(1))
  1455  	// Payload should not need be nil
  1456  	assertion.NotNil(stateResponse)
  1457  	assertion.NotNil(stateResponse.Payloads)
  1458  	// Exactly two messages expected
  1459  	assertion.Equal(len(stateResponse.Payloads), 2)
  1460  
  1461  	// Assert we have all data and it's same as we expected it
  1462  	for _, each := range stateResponse.Payloads {
  1463  		block := &pcomm.Block{}
  1464  		err := pb.Unmarshal(each.Data, block)
  1465  		assertion.NoError(err)
  1466  
  1467  		assertion.NotNil(block.Header)
  1468  
  1469  		testBlock, ok := data[block.Header.Number]
  1470  		assertion.True(ok)
  1471  
  1472  		for i, d := range testBlock.block.Data.Data {
  1473  			assertion.True(bytes.Equal(d, block.Data.Data[i]))
  1474  		}
  1475  
  1476  		for i, p := range testBlock.pvtData {
  1477  			pvtDataPayload := &proto.PvtDataPayload{}
  1478  			err := pb.Unmarshal(each.PrivateData[i], pvtDataPayload)
  1479  			assertion.NoError(err)
  1480  			pvtRWSet := &rwset.TxPvtReadWriteSet{}
  1481  			err = pb.Unmarshal(pvtDataPayload.Payload, pvtRWSet)
  1482  			assertion.NoError(err)
  1483  			assertion.True(pb.Equal(p.WriteSet, pvtRWSet))
  1484  		}
  1485  	}
  1486  }
  1487  
  1488  type testPeer struct {
  1489  	*mocks.GossipMock
  1490  	id            string
  1491  	gossipChannel chan *proto.GossipMessage
  1492  	commChannel   chan protoext.ReceivedMessage
  1493  	coord         *coordinatorMock
  1494  }
  1495  
  1496  func (t testPeer) Gossip() <-chan *proto.GossipMessage {
  1497  	return t.gossipChannel
  1498  }
  1499  
  1500  func (t testPeer) Comm() chan protoext.ReceivedMessage {
  1501  	return t.commChannel
  1502  }
  1503  
  1504  var peers = map[string]testPeer{
  1505  	"peer1": {
  1506  		id:            "peer1",
  1507  		gossipChannel: make(chan *proto.GossipMessage),
  1508  		commChannel:   make(chan protoext.ReceivedMessage),
  1509  		GossipMock:    &mocks.GossipMock{},
  1510  		coord:         new(coordinatorMock),
  1511  	},
  1512  	"peer2": {
  1513  		id:            "peer2",
  1514  		gossipChannel: make(chan *proto.GossipMessage),
  1515  		commChannel:   make(chan protoext.ReceivedMessage),
  1516  		GossipMock:    &mocks.GossipMock{},
  1517  		coord:         new(coordinatorMock),
  1518  	},
  1519  }
  1520  
  1521  func TestTransferOfPvtDataBetweenPeers(t *testing.T) {
  1522  	/*
  1523  	   This test covers pretty basic scenario, there are two peers: "peer1" and "peer2",
  1524  	   while peer2 missing a few blocks in the ledger therefore asking to replicate those
  1525  	   blocks from the first peers.
  1526  
  1527  	   Test going to check that block from one peer will be replicated into second one and
  1528  	   have identical content.
  1529  	*/
  1530  	t.Parallel()
  1531  	chainID := "testChainID"
  1532  
  1533  	// Initialize peer
  1534  	for _, peer := range peers {
  1535  		peer.On("Accept", mock.Anything, false).Return(peer.Gossip(), nil)
  1536  
  1537  		peer.On("Accept", mock.Anything, true).
  1538  			Return(nil, peer.Comm()).
  1539  			Once().
  1540  			On("Accept", mock.Anything, true).
  1541  			Return(nil, make(chan protoext.ReceivedMessage))
  1542  
  1543  		peer.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
  1544  		peer.coord.On("Close")
  1545  		peer.On("Close")
  1546  	}
  1547  
  1548  	// First peer going to have more advanced ledger
  1549  	peers["peer1"].coord.On("LedgerHeight", mock.Anything).Return(uint64(3), nil)
  1550  
  1551  	// Second peer has a gap of one block, hence it will have to replicate it from previous
  1552  	peers["peer2"].coord.On("LedgerHeight", mock.Anything).Return(uint64(2), nil)
  1553  
  1554  	peers["peer1"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
  1555  	peers["peer2"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
  1556  
  1557  	peers["peer1"].coord.On("GetPvtDataAndBlockByNum", uint64(2)).Return(&pcomm.Block{
  1558  		Header: &pcomm.BlockHeader{
  1559  			Number:       2,
  1560  			DataHash:     []byte{0, 0, 0, 1},
  1561  			PreviousHash: []byte{0, 1, 1, 1},
  1562  		},
  1563  		Data: &pcomm.BlockData{
  1564  			Data: [][]byte{{4}, {5}, {6}},
  1565  		},
  1566  	}, gutil.PvtDataCollections{&ledger.TxPvtData{
  1567  		SeqInBlock: uint64(1),
  1568  		WriteSet: &rwset.TxPvtReadWriteSet{
  1569  			DataModel: rwset.TxReadWriteSet_KV,
  1570  			NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1571  				{
  1572  					Namespace: "myCC:v1",
  1573  					CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1574  						{
  1575  							CollectionName: "mysecrectCollection",
  1576  							Rwset:          []byte{1, 2, 3, 4, 5},
  1577  						},
  1578  					},
  1579  				},
  1580  			},
  1581  		},
  1582  	}}, nil)
  1583  
  1584  	// Return membership of the peers
  1585  	member2 := discovery.NetworkMember{
  1586  		PKIid:            common.PKIidType([]byte{2}),
  1587  		Endpoint:         "peer2:7051",
  1588  		InternalEndpoint: "peer2:7051",
  1589  		Properties: &proto.Properties{
  1590  			LedgerHeight: 2,
  1591  		},
  1592  	}
  1593  
  1594  	member1 := discovery.NetworkMember{
  1595  		PKIid:            common.PKIidType([]byte{1}),
  1596  		Endpoint:         "peer1:7051",
  1597  		InternalEndpoint: "peer1:7051",
  1598  		Properties: &proto.Properties{
  1599  			LedgerHeight: 3,
  1600  		},
  1601  	}
  1602  
  1603  	peers["peer1"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member2})
  1604  	peers["peer2"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member1})
  1605  
  1606  	peers["peer2"].On("Send", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
  1607  		request := args.Get(0).(*proto.GossipMessage)
  1608  		requestMsg := new(receivedMessageMock)
  1609  		msg, _ := protoext.NoopSign(request)
  1610  		requestMsg.On("GetGossipMessage").Return(msg)
  1611  		requestMsg.On("GetConnectionInfo").Return(&protoext.ConnectionInfo{
  1612  			Auth: &protoext.AuthInfo{},
  1613  		})
  1614  
  1615  		requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
  1616  			response := args.Get(0).(*proto.GossipMessage)
  1617  			receivedMsg := new(receivedMessageMock)
  1618  			msg, _ := protoext.NoopSign(response)
  1619  			receivedMsg.On("GetGossipMessage").Return(msg)
  1620  			// Send response back to the peer
  1621  			peers["peer2"].commChannel <- receivedMsg
  1622  		})
  1623  
  1624  		peers["peer1"].commChannel <- requestMsg
  1625  	})
  1626  
  1627  	wg := sync.WaitGroup{}
  1628  	wg.Add(1)
  1629  	peers["peer2"].coord.On("StoreBlock", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
  1630  		wg.Done() // Done once second peer hits commit of the block
  1631  	}).Return([]string{}, nil) // No pvt data to complete and no error
  1632  
  1633  	cryptoService := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
  1634  
  1635  	stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
  1636  
  1637  	mediator := &ServicesMediator{GossipAdapter: peers["peer1"], MCSAdapter: cryptoService}
  1638  	stateConfig := &StateConfig{
  1639  		StateCheckInterval:   DefStateCheckInterval,
  1640  		StateResponseTimeout: DefStateResponseTimeout,
  1641  		StateBatchSize:       DefStateBatchSize,
  1642  		StateMaxRetries:      DefStateMaxRetries,
  1643  		StateBlockBufferSize: DefStateBlockBufferSize,
  1644  		StateChannelSize:     DefStateChannelSize,
  1645  		StateEnabled:         DefStateEnabled,
  1646  	}
  1647  	peer1State := NewGossipStateProvider(chainID, mediator, peers["peer1"].coord, stateMetrics, blocking, stateConfig)
  1648  	defer peer1State.Stop()
  1649  
  1650  	mediator = &ServicesMediator{GossipAdapter: peers["peer2"], MCSAdapter: cryptoService}
  1651  	peer2State := NewGossipStateProvider(chainID, mediator, peers["peer2"].coord, stateMetrics, blocking, stateConfig)
  1652  	defer peer2State.Stop()
  1653  
  1654  	// Make sure state was replicated
  1655  	done := make(chan struct{})
  1656  	go func() {
  1657  		wg.Wait()
  1658  		done <- struct{}{}
  1659  	}()
  1660  
  1661  	select {
  1662  	case <-done:
  1663  		break
  1664  	case <-time.After(30 * time.Second):
  1665  		t.Fail()
  1666  	}
  1667  }
  1668  
  1669  func TestStateRequestValidator(t *testing.T) {
  1670  	validator := &stateRequestValidator{}
  1671  	err := validator.validate(&proto.RemoteStateRequest{
  1672  		StartSeqNum: 10,
  1673  		EndSeqNum:   5,
  1674  	}, defAntiEntropyBatchSize)
  1675  	assert.Contains(t, err.Error(), "Invalid sequence interval [10...5).")
  1676  	assert.Error(t, err)
  1677  
  1678  	err = validator.validate(&proto.RemoteStateRequest{
  1679  		StartSeqNum: 10,
  1680  		EndSeqNum:   30,
  1681  	}, defAntiEntropyBatchSize)
  1682  	assert.Contains(t, err.Error(), "Requesting blocks range [10-30) greater than configured")
  1683  	assert.Error(t, err)
  1684  
  1685  	err = validator.validate(&proto.RemoteStateRequest{
  1686  		StartSeqNum: 10,
  1687  		EndSeqNum:   20,
  1688  	}, defAntiEntropyBatchSize)
  1689  	assert.NoError(t, err)
  1690  }
  1691  
  1692  func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) {
  1693  	ch := make(chan struct{})
  1694  	t.Log("Started to spin off, until predicate will be satisfied.")
  1695  
  1696  	go func() {
  1697  		t := time.NewTicker(time.Second)
  1698  		for !predicate() {
  1699  			select {
  1700  			case <-ch:
  1701  				t.Stop()
  1702  				return
  1703  			case <-t.C:
  1704  			}
  1705  		}
  1706  		t.Stop()
  1707  		close(ch)
  1708  	}()
  1709  
  1710  	select {
  1711  	case <-ch:
  1712  		t.Log("Done.")
  1713  		break
  1714  	case <-time.After(timeout):
  1715  		t.Fatal("Timeout has expired")
  1716  		close(ch)
  1717  		break
  1718  	}
  1719  	t.Log("Stop waiting until timeout or true")
  1720  }
  1721  
  1722  func assertLogged(t *testing.T, buf *gbytes.Buffer, msg string) {
  1723  	observed := func() bool { return strings.Contains(string(buf.Contents()), msg) }
  1724  	waitUntilTrueOrTimeout(t, observed, 30*time.Second)
  1725  }