github.com/tenywen/fabric@v1.0.0-beta.0.20170620030522-a5b1ed380643/gossip/state/state_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8                   http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package state
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	"strconv"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  
    28  	pb "github.com/golang/protobuf/proto"
    29  	"github.com/hyperledger/fabric/common/configtx/test"
    30  	"github.com/hyperledger/fabric/common/util"
    31  	"github.com/hyperledger/fabric/core/committer"
    32  	"github.com/hyperledger/fabric/core/ledger/ledgermgmt"
    33  	"github.com/hyperledger/fabric/core/mocks/validator"
    34  	"github.com/hyperledger/fabric/gossip/api"
    35  	"github.com/hyperledger/fabric/gossip/comm"
    36  	"github.com/hyperledger/fabric/gossip/common"
    37  	"github.com/hyperledger/fabric/gossip/gossip"
    38  	"github.com/hyperledger/fabric/gossip/identity"
    39  	"github.com/hyperledger/fabric/gossip/state/mocks"
    40  	gutil "github.com/hyperledger/fabric/gossip/util"
    41  	pcomm "github.com/hyperledger/fabric/protos/common"
    42  	proto "github.com/hyperledger/fabric/protos/gossip"
    43  	"github.com/spf13/viper"
    44  	"github.com/stretchr/testify/assert"
    45  	"github.com/stretchr/testify/mock"
    46  )
    47  
    48  var (
    49  	portPrefix = 5610
    50  )
    51  
    52  var orgID = []byte("ORG1")
    53  
    54  type peerIdentityAcceptor func(identity api.PeerIdentityType) error
    55  
    56  var noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error {
    57  	return nil
    58  }
    59  
    60  type joinChanMsg struct {
    61  }
    62  
    63  func init() {
    64  	gutil.SetupTestLogging()
    65  }
    66  
    67  // SequenceNumber returns the sequence number of the block that the message
    68  // is derived from
    69  func (*joinChanMsg) SequenceNumber() uint64 {
    70  	return uint64(time.Now().UnixNano())
    71  }
    72  
    73  // Members returns the organizations of the channel
    74  func (jcm *joinChanMsg) Members() []api.OrgIdentityType {
    75  	return []api.OrgIdentityType{orgID}
    76  }
    77  
    78  // AnchorPeersOf returns the anchor peers of the given organization
    79  func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
    80  	return []api.AnchorPeer{}
    81  }
    82  
    83  type orgCryptoService struct {
    84  }
    85  
    86  // OrgByPeerIdentity returns the OrgIdentityType
    87  // of a given peer identity
    88  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
    89  	return orgID
    90  }
    91  
    92  // Verify verifies a JoinChannelMessage, returns nil on success,
    93  // and an error on failure
    94  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
    95  	return nil
    96  }
    97  
    98  type cryptoServiceMock struct {
    99  	acceptor peerIdentityAcceptor
   100  }
   101  
   102  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   103  func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType {
   104  	return common.PKIidType(peerIdentity)
   105  }
   106  
   107  // VerifyBlock returns nil if the block is properly signed,
   108  // else returns error
   109  func (*cryptoServiceMock) VerifyBlock(chainID common.ChainID, seqNum uint64, signedBlock []byte) error {
   110  	return nil
   111  }
   112  
   113  // Sign signs msg with this peer's signing key and outputs
   114  // the signature if no error occurred.
   115  func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) {
   116  	clone := make([]byte, len(msg))
   117  	copy(clone, msg)
   118  	return clone, nil
   119  }
   120  
   121  // Verify checks that signature is a valid signature of message under a peer's verification key.
   122  // If the verification succeeded, Verify returns nil meaning no error occurred.
   123  // If peerCert is nil, then the signature is verified against this peer's verification key.
   124  func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   125  	equal := bytes.Equal(signature, message)
   126  	if !equal {
   127  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   128  	}
   129  	return nil
   130  }
   131  
   132  // VerifyByChannel checks that signature is a valid signature of message
   133  // under a peer's verification key, but also in the context of a specific channel.
   134  // If the verification succeeded, Verify returns nil meaning no error occurred.
   135  // If peerIdentity is nil, then the signature is verified against this peer's verification key.
   136  func (cs *cryptoServiceMock) VerifyByChannel(chainID common.ChainID, peerIdentity api.PeerIdentityType, signature, message []byte) error {
   137  	return cs.acceptor(peerIdentity)
   138  }
   139  
   140  func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   141  	return nil
   142  }
   143  
   144  func bootPeers(ids ...int) []string {
   145  	peers := []string{}
   146  	for _, id := range ids {
   147  		peers = append(peers, fmt.Sprintf("localhost:%d", id+portPrefix))
   148  	}
   149  	return peers
   150  }
   151  
   152  // Simple presentation of peer which includes only
   153  // communication module, gossip and state transfer
   154  type peerNode struct {
   155  	port   int
   156  	g      gossip.Gossip
   157  	s      GossipStateProvider
   158  	cs     *cryptoServiceMock
   159  	commit committer.Committer
   160  }
   161  
   162  // Shutting down all modules used
   163  func (node *peerNode) shutdown() {
   164  	node.s.Stop()
   165  	node.g.Stop()
   166  }
   167  
   168  type mockCommitter struct {
   169  	mock.Mock
   170  }
   171  
   172  func (mc *mockCommitter) Commit(block *pcomm.Block) error {
   173  	mc.Called(block)
   174  	return nil
   175  }
   176  
   177  func (mc *mockCommitter) LedgerHeight() (uint64, error) {
   178  	if mc.Called().Get(1) == nil {
   179  		return mc.Called().Get(0).(uint64), nil
   180  	}
   181  	return mc.Called().Get(0).(uint64), mc.Called().Get(1).(error)
   182  }
   183  
   184  func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block {
   185  	if mc.Called(blockSeqs).Get(0) == nil {
   186  		return nil
   187  	}
   188  	return mc.Called(blockSeqs).Get(0).([]*pcomm.Block)
   189  }
   190  
   191  func (*mockCommitter) Close() {
   192  }
   193  
   194  // Default configuration to be used for gossip and communication modules
   195  func newGossipConfig(id int, boot ...int) *gossip.Config {
   196  	port := id + portPrefix
   197  	return &gossip.Config{
   198  		BindPort:                   port,
   199  		BootstrapPeers:             bootPeers(boot...),
   200  		ID:                         fmt.Sprintf("p%d", id),
   201  		MaxBlockCountToStore:       0,
   202  		MaxPropagationBurstLatency: time.Duration(10) * time.Millisecond,
   203  		MaxPropagationBurstSize:    10,
   204  		PropagateIterations:        1,
   205  		PropagatePeerNum:           3,
   206  		PullInterval:               time.Duration(4) * time.Second,
   207  		PullPeerNum:                5,
   208  		InternalEndpoint:           fmt.Sprintf("localhost:%d", port),
   209  		PublishCertPeriod:          10 * time.Second,
   210  		RequestStateInfoInterval:   4 * time.Second,
   211  		PublishStateInfoInterval:   4 * time.Second,
   212  	}
   213  }
   214  
   215  // Create gossip instance
   216  func newGossipInstance(config *gossip.Config, mcs api.MessageCryptoService) gossip.Gossip {
   217  	id := api.PeerIdentityType(config.InternalEndpoint)
   218  	idMapper := identity.NewIdentityMapper(mcs, id)
   219  	return gossip.NewGossipServiceWithServer(config, &orgCryptoService{}, mcs,
   220  		idMapper, id, nil)
   221  }
   222  
   223  // Create new instance of KVLedger to be used for testing
   224  func newCommitter(id int) committer.Committer {
   225  	cb, _ := test.MakeGenesisBlock(strconv.Itoa(id))
   226  	ledger, _ := ledgermgmt.CreateLedger(cb)
   227  	return committer.NewLedgerCommitter(ledger, &validator.MockValidator{})
   228  }
   229  
   230  // Constructing pseudo peer node, simulating only gossip and state transfer part
   231  func newPeerNodeWithGossip(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor, g gossip.Gossip) *peerNode {
   232  	cs := &cryptoServiceMock{acceptor: acceptor}
   233  	// Gossip component based on configuration provided and communication module
   234  	if g == nil {
   235  		g = newGossipInstance(config, &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor})
   236  	}
   237  
   238  	logger.Debug("Joinning channel", util.GetTestChainID())
   239  	g.JoinChan(&joinChanMsg{}, common.ChainID(util.GetTestChainID()))
   240  
   241  	// Initialize pseudo peer simulator, which has only three
   242  	// basic parts
   243  
   244  	sp := NewGossipStateProvider(util.GetTestChainID(), g, committer, cs)
   245  	if sp == nil {
   246  		return nil
   247  	}
   248  
   249  	return &peerNode{
   250  		port:   config.BindPort,
   251  		g:      g,
   252  		s:      sp,
   253  		commit: committer,
   254  		cs:     cs,
   255  	}
   256  }
   257  
   258  // Constructing pseudo peer node, simulating only gossip and state transfer part
   259  func newPeerNode(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor) *peerNode {
   260  	return newPeerNodeWithGossip(config, committer, acceptor, nil)
   261  }
   262  
   263  func TestNilDirectMsg(t *testing.T) {
   264  	mc := &mockCommitter{}
   265  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   266  	g := &mocks.GossipMock{}
   267  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   268  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   269  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   270  	defer p.shutdown()
   271  	p.s.(*GossipStateProviderImpl).handleStateRequest(nil)
   272  	p.s.(*GossipStateProviderImpl).directMessage(nil)
   273  	sMsg, _ := p.s.(*GossipStateProviderImpl).stateRequestMessage(uint64(10), uint64(8)).NoopSign()
   274  	req := &comm.ReceivedMessageImpl{
   275  		SignedGossipMessage: sMsg,
   276  	}
   277  	p.s.(*GossipStateProviderImpl).directMessage(req)
   278  }
   279  
   280  func TestFailures(t *testing.T) {
   281  	mc := &mockCommitter{}
   282  	mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil)
   283  	g := &mocks.GossipMock{}
   284  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   285  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   286  	assert.Panics(t, func() {
   287  		newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   288  	})
   289  	// Reprogram mock
   290  	mc.Mock = mock.Mock{}
   291  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger"))
   292  	assert.Nil(t, newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g))
   293  	// Reprogram mock
   294  	mc.Mock = mock.Mock{}
   295  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   296  	mc.On("GetBlocks", mock.Anything).Return(nil)
   297  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   298  	assert.Nil(t, p.s.GetBlock(uint64(1)))
   299  }
   300  
   301  func TestGossipReception(t *testing.T) {
   302  	signalChan := make(chan struct{})
   303  	rawblock := &pcomm.Block{
   304  		Header: &pcomm.BlockHeader{
   305  			Number: uint64(1),
   306  		},
   307  		Data: &pcomm.BlockData{
   308  			Data: [][]byte{},
   309  		},
   310  	}
   311  	b, _ := pb.Marshal(rawblock)
   312  
   313  	createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage {
   314  		c := make(chan *proto.GossipMessage)
   315  		gMsg := &proto.GossipMessage{
   316  			Channel: []byte("AAA"),
   317  			Content: &proto.GossipMessage_DataMsg{
   318  				DataMsg: &proto.DataMessage{
   319  					Payload: &proto.Payload{
   320  						SeqNum: 1,
   321  						Data:   b,
   322  					},
   323  				},
   324  			},
   325  		}
   326  		go func(c chan *proto.GossipMessage) {
   327  			// Wait for Accept() to be called
   328  			<-signalChan
   329  			// Simulate a message reception from the gossip component with an invalid channel
   330  			c <- gMsg
   331  			gMsg.Channel = []byte(util.GetTestChainID())
   332  			// Simulate a message reception from the gossip component
   333  			c <- gMsg
   334  		}(c)
   335  		return c
   336  	}
   337  
   338  	g := &mocks.GossipMock{}
   339  	rmc := createChan(signalChan)
   340  	g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) {
   341  		signalChan <- struct{}{}
   342  	})
   343  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   344  	mc := &mockCommitter{}
   345  	receivedChan := make(chan struct{})
   346  	mc.On("Commit", mock.Anything).Run(func(arguments mock.Arguments) {
   347  		block := arguments.Get(0).(*pcomm.Block)
   348  		assert.Equal(t, uint64(1), block.Header.Number)
   349  		receivedChan <- struct{}{}
   350  	})
   351  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   352  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   353  	defer p.shutdown()
   354  	select {
   355  	case <-receivedChan:
   356  	case <-time.After(time.Second * 15):
   357  		assert.Fail(t, "Didn't commit a block within a timely manner")
   358  	}
   359  }
   360  
   361  func TestAccessControl(t *testing.T) {
   362  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   363  	ledgermgmt.InitializeTestEnv()
   364  	defer ledgermgmt.CleanupTestEnv()
   365  
   366  	bootstrapSetSize := 5
   367  	bootstrapSet := make([]*peerNode, 0)
   368  
   369  	authorizedPeers := map[string]struct{}{
   370  		"localhost:5610": {},
   371  		"localhost:5615": {},
   372  		"localhost:5618": {},
   373  		"localhost:5621": {},
   374  	}
   375  
   376  	blockPullPolicy := func(identity api.PeerIdentityType) error {
   377  		if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized {
   378  			return nil
   379  		}
   380  		return errors.New("Not authorized")
   381  	}
   382  
   383  	for i := 0; i < bootstrapSetSize; i++ {
   384  		commit := newCommitter(i)
   385  		bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(i), commit, blockPullPolicy))
   386  	}
   387  
   388  	defer func() {
   389  		for _, p := range bootstrapSet {
   390  			p.shutdown()
   391  		}
   392  	}()
   393  
   394  	msgCount := 5
   395  
   396  	for i := 1; i <= msgCount; i++ {
   397  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   398  		if b, err := pb.Marshal(rawblock); err == nil {
   399  			payload := &proto.Payload{
   400  				SeqNum: uint64(i),
   401  				Data:   b,
   402  			}
   403  			bootstrapSet[0].s.AddPayload(payload)
   404  		} else {
   405  			t.Fail()
   406  		}
   407  	}
   408  
   409  	standardPeerSetSize := 10
   410  	peersSet := make([]*peerNode, 0)
   411  
   412  	for i := 0; i < standardPeerSetSize; i++ {
   413  		commit := newCommitter(bootstrapSetSize + i)
   414  		peersSet = append(peersSet, newPeerNode(newGossipConfig(bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, blockPullPolicy))
   415  	}
   416  
   417  	defer func() {
   418  		for _, p := range peersSet {
   419  			p.shutdown()
   420  		}
   421  	}()
   422  
   423  	waitUntilTrueOrTimeout(t, func() bool {
   424  		for _, p := range peersSet {
   425  			if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standardPeerSetSize-1 {
   426  				logger.Debug("Peer discovery has not finished yet")
   427  				return false
   428  			}
   429  		}
   430  		logger.Debug("All peer discovered each other!!!")
   431  		return true
   432  	}, 30*time.Second)
   433  
   434  	logger.Debug("Waiting for all blocks to arrive.")
   435  	waitUntilTrueOrTimeout(t, func() bool {
   436  		logger.Debug("Trying to see all authorized peers get all blocks, and all non-authorized didn't")
   437  		for _, p := range peersSet {
   438  			height, err := p.commit.LedgerHeight()
   439  			id := fmt.Sprintf("localhost:%d", p.port)
   440  			if _, isAuthorized := authorizedPeers[id]; isAuthorized {
   441  				if height != uint64(msgCount+1) || err != nil {
   442  					return false
   443  				}
   444  			} else {
   445  				if err == nil && height > 1 {
   446  					assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height)
   447  				}
   448  			}
   449  		}
   450  		logger.Debug("All peers have same ledger height!!!")
   451  		return true
   452  	}, 60*time.Second)
   453  }
   454  
   455  /*// Simple scenario to start first booting node, gossip a message
   456  // then start second node and verify second node also receives it
   457  func TestNewGossipStateProvider_GossipingOneMessage(t *testing.T) {
   458  	bootId := 0
   459  	ledgerPath := "/tmp/tests/ledger/"
   460  	defer os.RemoveAll(ledgerPath)
   461  
   462  	bootNodeCommitter := newCommitter(bootId, ledgerPath + "node/")
   463  	defer bootNodeCommitter.Close()
   464  
   465  	bootNode := newPeerNode(newGossipConfig(bootId, 100), bootNodeCommitter)
   466  	defer bootNode.shutdown()
   467  
   468  	rawblock := &peer.Block2{}
   469  	if err := pb.Unmarshal([]byte{}, rawblock); err != nil {
   470  		t.Fail()
   471  	}
   472  
   473  	if bytes, err := pb.Marshal(rawblock); err == nil {
   474  		payload := &proto.Payload{1, "", bytes}
   475  		bootNode.s.AddPayload(payload)
   476  	} else {
   477  		t.Fail()
   478  	}
   479  
   480  	waitUntilTrueOrTimeout(t, func() bool {
   481  		if block := bootNode.s.GetBlock(uint64(1)); block != nil {
   482  			return true
   483  		}
   484  		return false
   485  	}, 5 * time.Second)
   486  
   487  	bootNode.g.Gossip(createDataMsg(uint64(1), []byte{}, ""))
   488  
   489  	peerCommitter := newCommitter(1, ledgerPath + "node/")
   490  	defer peerCommitter.Close()
   491  
   492  	peer := newPeerNode(newGossipConfig(1, 100, bootId), peerCommitter)
   493  	defer peer.shutdown()
   494  
   495  	ready := make(chan interface{})
   496  
   497  	go func(p *peerNode) {
   498  		for len(p.g.GetPeers()) != 1 {
   499  			time.Sleep(100 * time.Millisecond)
   500  		}
   501  		ready <- struct{}{}
   502  	}(peer)
   503  
   504  	select {
   505  	case <-ready:
   506  		{
   507  			break
   508  		}
   509  	case <-time.After(1 * time.Second):
   510  		{
   511  			t.Fail()
   512  		}
   513  	}
   514  
   515  	// Let sure anti-entropy will have a chance to bring missing block
   516  	waitUntilTrueOrTimeout(t, func() bool {
   517  		if block := peer.s.GetBlock(uint64(1)); block != nil {
   518  			return true
   519  		}
   520  		return false
   521  	}, 2 * defAntiEntropyInterval + 1 * time.Second)
   522  
   523  	block := peer.s.GetBlock(uint64(1))
   524  
   525  	assert.NotNil(t, block)
   526  }
   527  
   528  func TestNewGossipStateProvider_RepeatGossipingOneMessage(t *testing.T) {
   529  	for i := 0; i < 10; i++ {
   530  		TestNewGossipStateProvider_GossipingOneMessage(t)
   531  	}
   532  }*/
   533  
   534  func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) {
   535  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   536  	ledgermgmt.InitializeTestEnv()
   537  	defer ledgermgmt.CleanupTestEnv()
   538  
   539  	bootstrapSetSize := 5
   540  	bootstrapSet := make([]*peerNode, 0)
   541  
   542  	for i := 0; i < bootstrapSetSize; i++ {
   543  		commit := newCommitter(i)
   544  		bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(i), commit, noopPeerIdentityAcceptor))
   545  	}
   546  
   547  	defer func() {
   548  		for _, p := range bootstrapSet {
   549  			p.shutdown()
   550  		}
   551  	}()
   552  
   553  	msgCount := 10
   554  
   555  	for i := 1; i <= msgCount; i++ {
   556  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   557  		if b, err := pb.Marshal(rawblock); err == nil {
   558  			payload := &proto.Payload{
   559  				SeqNum: uint64(i),
   560  				Data:   b,
   561  			}
   562  			bootstrapSet[0].s.AddPayload(payload)
   563  		} else {
   564  			t.Fail()
   565  		}
   566  	}
   567  
   568  	standartPeersSize := 10
   569  	peersSet := make([]*peerNode, 0)
   570  
   571  	for i := 0; i < standartPeersSize; i++ {
   572  		commit := newCommitter(bootstrapSetSize + i)
   573  		peersSet = append(peersSet, newPeerNode(newGossipConfig(bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, noopPeerIdentityAcceptor))
   574  	}
   575  
   576  	defer func() {
   577  		for _, p := range peersSet {
   578  			p.shutdown()
   579  		}
   580  	}()
   581  
   582  	waitUntilTrueOrTimeout(t, func() bool {
   583  		for _, p := range peersSet {
   584  			if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standartPeersSize-1 {
   585  				logger.Debug("Peer discovery has not finished yet")
   586  				return false
   587  			}
   588  		}
   589  		logger.Debug("All peer discovered each other!!!")
   590  		return true
   591  	}, 30*time.Second)
   592  
   593  	logger.Debug("Waiting for all blocks to arrive.")
   594  	waitUntilTrueOrTimeout(t, func() bool {
   595  		logger.Debug("Trying to see all peers get all blocks")
   596  		for _, p := range peersSet {
   597  			height, err := p.commit.LedgerHeight()
   598  			if height != uint64(msgCount+1) || err != nil {
   599  				return false
   600  			}
   601  		}
   602  		logger.Debug("All peers have same ledger height!!!")
   603  		return true
   604  	}, 60*time.Second)
   605  }
   606  
   607  func TestGossipStateProvider_TestStateMessages(t *testing.T) {
   608  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   609  	ledgermgmt.InitializeTestEnv()
   610  	defer ledgermgmt.CleanupTestEnv()
   611  
   612  	bootPeer := newPeerNode(newGossipConfig(0), newCommitter(0), noopPeerIdentityAcceptor)
   613  	defer bootPeer.shutdown()
   614  
   615  	peer := newPeerNode(newGossipConfig(1, 0), newCommitter(1), noopPeerIdentityAcceptor)
   616  	defer peer.shutdown()
   617  
   618  	naiveStateMsgPredicate := func(message interface{}) bool {
   619  		return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage()
   620  	}
   621  
   622  	_, bootCh := bootPeer.g.Accept(naiveStateMsgPredicate, true)
   623  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
   624  
   625  	wg := sync.WaitGroup{}
   626  	wg.Add(2)
   627  
   628  	go func() {
   629  		msg := <-bootCh
   630  		logger.Info("Bootstrap node got message, ", msg)
   631  		assert.True(t, msg.GetGossipMessage().GetStateRequest() != nil)
   632  		msg.Respond(&proto.GossipMessage{
   633  			Content: &proto.GossipMessage_StateResponse{&proto.RemoteStateResponse{nil}},
   634  		})
   635  		wg.Done()
   636  	}()
   637  
   638  	go func() {
   639  		msg := <-peerCh
   640  		logger.Info("Peer node got an answer, ", msg)
   641  		assert.True(t, msg.GetGossipMessage().GetStateResponse() != nil)
   642  		wg.Done()
   643  
   644  	}()
   645  
   646  	readyCh := make(chan struct{})
   647  	go func() {
   648  		wg.Wait()
   649  		readyCh <- struct{}{}
   650  	}()
   651  
   652  	time.Sleep(time.Duration(5) * time.Second)
   653  	logger.Info("Sending gossip message with remote state request")
   654  
   655  	chainID := common.ChainID(util.GetTestChainID())
   656  
   657  	peer.g.Send(&proto.GossipMessage{
   658  		Content: &proto.GossipMessage_StateRequest{&proto.RemoteStateRequest{0, 1}},
   659  	}, &comm.RemotePeer{peer.g.PeersOfChannel(chainID)[0].Endpoint, peer.g.PeersOfChannel(chainID)[0].PKIid})
   660  	logger.Info("Waiting until peers exchange messages")
   661  
   662  	select {
   663  	case <-readyCh:
   664  		{
   665  			logger.Info("Done!!!")
   666  
   667  		}
   668  	case <-time.After(time.Duration(10) * time.Second):
   669  		{
   670  			t.Fail()
   671  		}
   672  	}
   673  }
   674  
   675  // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into
   676  // local ledger, next spawning a new peer waiting for anti-entropy procedure to
   677  // complete missing blocks. Since state transfer messages now batched, it is expected
   678  // to see _exactly_ two messages with state transfer response.
   679  func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) {
   680  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   681  	ledgermgmt.InitializeTestEnv()
   682  	defer ledgermgmt.CleanupTestEnv()
   683  
   684  	bootPeer := newPeerNode(newGossipConfig(0), newCommitter(0), noopPeerIdentityAcceptor)
   685  	defer bootPeer.shutdown()
   686  
   687  	msgCount := defAntiEntropyBatchSize + 5
   688  	expectedMessagesCnt := 2
   689  
   690  	for i := 1; i <= msgCount; i++ {
   691  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   692  		if b, err := pb.Marshal(rawblock); err == nil {
   693  			payload := &proto.Payload{
   694  				SeqNum: uint64(i),
   695  				Data:   b,
   696  			}
   697  			bootPeer.s.AddPayload(payload)
   698  		} else {
   699  			t.Fail()
   700  		}
   701  	}
   702  
   703  	peer := newPeerNode(newGossipConfig(1, 0), newCommitter(1), noopPeerIdentityAcceptor)
   704  	defer peer.shutdown()
   705  
   706  	naiveStateMsgPredicate := func(message interface{}) bool {
   707  		return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage()
   708  	}
   709  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
   710  
   711  	messageCh := make(chan struct{})
   712  	stopWaiting := make(chan struct{})
   713  
   714  	// Number of submitted messages is defAntiEntropyBatchSize + 5, therefore
   715  	// expected number of batches is expectedMessagesCnt = 2. Following go routine
   716  	// makes sure it receives expected amount of messages and sends signal of success
   717  	// to continue the test
   718  	go func(expected int) {
   719  		cnt := 0
   720  		for cnt < expected {
   721  			select {
   722  			case <-peerCh:
   723  				{
   724  					cnt++
   725  				}
   726  
   727  			case <-stopWaiting:
   728  				{
   729  					return
   730  				}
   731  			}
   732  		}
   733  
   734  		messageCh <- struct{}{}
   735  	}(expectedMessagesCnt)
   736  
   737  	// Waits for message which indicates that expected number of message batches received
   738  	// otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds
   739  	select {
   740  	case <-messageCh:
   741  		{
   742  			// Once we got message which indicate of two batches being received,
   743  			// making sure messages indeed committed.
   744  			waitUntilTrueOrTimeout(t, func() bool {
   745  				if len(peer.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != 1 {
   746  					logger.Debug("Peer discovery has not finished yet")
   747  					return false
   748  				}
   749  				logger.Debug("All peer discovered each other!!!")
   750  				return true
   751  			}, 30*time.Second)
   752  
   753  			logger.Debug("Waiting for all blocks to arrive.")
   754  			waitUntilTrueOrTimeout(t, func() bool {
   755  				logger.Debug("Trying to see all peers get all blocks")
   756  				height, err := peer.commit.LedgerHeight()
   757  				if height != uint64(msgCount+1) || err != nil {
   758  					return false
   759  				}
   760  				logger.Debug("All peers have same ledger height!!!")
   761  				return true
   762  			}, 60*time.Second)
   763  		}
   764  	case <-time.After(defAntiEntropyInterval*2 + time.Second*1):
   765  		{
   766  			close(stopWaiting)
   767  			t.Fatal("Expected to receive two batches with missing payloads")
   768  		}
   769  	}
   770  }
   771  
   772  func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) {
   773  	ch := make(chan struct{})
   774  	go func() {
   775  		logger.Debug("Started to spin off, until predicate will be satisfied.")
   776  		for !predicate() {
   777  			time.Sleep(1 * time.Second)
   778  		}
   779  		ch <- struct{}{}
   780  		logger.Debug("Done.")
   781  	}()
   782  
   783  	select {
   784  	case <-ch:
   785  		break
   786  	case <-time.After(timeout):
   787  		t.Fatal("Timeout has expired")
   788  		break
   789  	}
   790  	logger.Debug("Stop waiting until timeout or true")
   791  }