github.com/myafeier/fabric@v1.0.1-0.20170722181825-3a4b1f2bce86/gossip/state/state_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package state
     8  
     9  import (
    10  	"bytes"
    11  	"errors"
    12  	"fmt"
    13  	"strconv"
    14  	"sync"
    15  	"testing"
    16  	"time"
    17  
    18  	pb "github.com/golang/protobuf/proto"
    19  	"github.com/hyperledger/fabric/common/configtx/test"
    20  	"github.com/hyperledger/fabric/common/util"
    21  	"github.com/hyperledger/fabric/core/committer"
    22  	"github.com/hyperledger/fabric/core/ledger/ledgermgmt"
    23  	"github.com/hyperledger/fabric/core/mocks/validator"
    24  	"github.com/hyperledger/fabric/gossip/api"
    25  	"github.com/hyperledger/fabric/gossip/comm"
    26  	"github.com/hyperledger/fabric/gossip/common"
    27  	"github.com/hyperledger/fabric/gossip/gossip"
    28  	"github.com/hyperledger/fabric/gossip/identity"
    29  	"github.com/hyperledger/fabric/gossip/state/mocks"
    30  	gutil "github.com/hyperledger/fabric/gossip/util"
    31  	pcomm "github.com/hyperledger/fabric/protos/common"
    32  	proto "github.com/hyperledger/fabric/protos/gossip"
    33  	"github.com/spf13/viper"
    34  	"github.com/stretchr/testify/assert"
    35  	"github.com/stretchr/testify/mock"
    36  )
    37  
    38  var (
    39  	portPrefix = 5610
    40  )
    41  
    42  var orgID = []byte("ORG1")
    43  
    44  type peerIdentityAcceptor func(identity api.PeerIdentityType) error
    45  
    46  var noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error {
    47  	return nil
    48  }
    49  
    50  type joinChanMsg struct {
    51  }
    52  
    53  func init() {
    54  	gutil.SetupTestLogging()
    55  }
    56  
    57  // SequenceNumber returns the sequence number of the block that the message
    58  // is derived from
    59  func (*joinChanMsg) SequenceNumber() uint64 {
    60  	return uint64(time.Now().UnixNano())
    61  }
    62  
    63  // Members returns the organizations of the channel
    64  func (jcm *joinChanMsg) Members() []api.OrgIdentityType {
    65  	return []api.OrgIdentityType{orgID}
    66  }
    67  
    68  // AnchorPeersOf returns the anchor peers of the given organization
    69  func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
    70  	return []api.AnchorPeer{}
    71  }
    72  
    73  type orgCryptoService struct {
    74  }
    75  
    76  // OrgByPeerIdentity returns the OrgIdentityType
    77  // of a given peer identity
    78  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
    79  	return orgID
    80  }
    81  
    82  // Verify verifies a JoinChannelMessage, returns nil on success,
    83  // and an error on failure
    84  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
    85  	return nil
    86  }
    87  
    88  type cryptoServiceMock struct {
    89  	acceptor peerIdentityAcceptor
    90  }
    91  
    92  // GetPKIidOfCert returns the PKI-ID of a peer's identity
    93  func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType {
    94  	return common.PKIidType(peerIdentity)
    95  }
    96  
    97  // VerifyBlock returns nil if the block is properly signed,
    98  // else returns error
    99  func (*cryptoServiceMock) VerifyBlock(chainID common.ChainID, seqNum uint64, signedBlock []byte) error {
   100  	return nil
   101  }
   102  
   103  // Sign signs msg with this peer's signing key and outputs
   104  // the signature if no error occurred.
   105  func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) {
   106  	clone := make([]byte, len(msg))
   107  	copy(clone, msg)
   108  	return clone, nil
   109  }
   110  
   111  // Verify checks that signature is a valid signature of message under a peer's verification key.
   112  // If the verification succeeded, Verify returns nil meaning no error occurred.
   113  // If peerCert is nil, then the signature is verified against this peer's verification key.
   114  func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   115  	equal := bytes.Equal(signature, message)
   116  	if !equal {
   117  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   118  	}
   119  	return nil
   120  }
   121  
   122  // VerifyByChannel checks that signature is a valid signature of message
   123  // under a peer's verification key, but also in the context of a specific channel.
   124  // If the verification succeeded, Verify returns nil meaning no error occurred.
   125  // If peerIdentity is nil, then the signature is verified against this peer's verification key.
   126  func (cs *cryptoServiceMock) VerifyByChannel(chainID common.ChainID, peerIdentity api.PeerIdentityType, signature, message []byte) error {
   127  	return cs.acceptor(peerIdentity)
   128  }
   129  
   130  func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   131  	return nil
   132  }
   133  
   134  func bootPeers(ids ...int) []string {
   135  	peers := []string{}
   136  	for _, id := range ids {
   137  		peers = append(peers, fmt.Sprintf("localhost:%d", id+portPrefix))
   138  	}
   139  	return peers
   140  }
   141  
   142  // Simple presentation of peer which includes only
   143  // communication module, gossip and state transfer
   144  type peerNode struct {
   145  	port   int
   146  	g      gossip.Gossip
   147  	s      GossipStateProvider
   148  	cs     *cryptoServiceMock
   149  	commit committer.Committer
   150  }
   151  
   152  // Shutting down all modules used
   153  func (node *peerNode) shutdown() {
   154  	node.s.Stop()
   155  	node.g.Stop()
   156  }
   157  
   158  type mockCommitter struct {
   159  	mock.Mock
   160  	sync.Mutex
   161  }
   162  
   163  func (mc *mockCommitter) Commit(block *pcomm.Block) error {
   164  	mc.Called(block)
   165  	return nil
   166  }
   167  
   168  func (mc *mockCommitter) LedgerHeight() (uint64, error) {
   169  	mc.Lock()
   170  	defer mc.Unlock()
   171  	if mc.Called().Get(1) == nil {
   172  		return mc.Called().Get(0).(uint64), nil
   173  	}
   174  	return mc.Called().Get(0).(uint64), mc.Called().Get(1).(error)
   175  }
   176  
   177  func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block {
   178  	if mc.Called(blockSeqs).Get(0) == nil {
   179  		return nil
   180  	}
   181  	return mc.Called(blockSeqs).Get(0).([]*pcomm.Block)
   182  }
   183  
   184  func (*mockCommitter) Close() {
   185  }
   186  
   187  // Default configuration to be used for gossip and communication modules
   188  func newGossipConfig(id int, boot ...int) *gossip.Config {
   189  	port := id + portPrefix
   190  	return &gossip.Config{
   191  		BindPort:                   port,
   192  		BootstrapPeers:             bootPeers(boot...),
   193  		ID:                         fmt.Sprintf("p%d", id),
   194  		MaxBlockCountToStore:       0,
   195  		MaxPropagationBurstLatency: time.Duration(10) * time.Millisecond,
   196  		MaxPropagationBurstSize:    10,
   197  		PropagateIterations:        1,
   198  		PropagatePeerNum:           3,
   199  		PullInterval:               time.Duration(4) * time.Second,
   200  		PullPeerNum:                5,
   201  		InternalEndpoint:           fmt.Sprintf("localhost:%d", port),
   202  		PublishCertPeriod:          10 * time.Second,
   203  		RequestStateInfoInterval:   4 * time.Second,
   204  		PublishStateInfoInterval:   4 * time.Second,
   205  	}
   206  }
   207  
   208  // Create gossip instance
   209  func newGossipInstance(config *gossip.Config, mcs api.MessageCryptoService) gossip.Gossip {
   210  	id := api.PeerIdentityType(config.InternalEndpoint)
   211  	idMapper := identity.NewIdentityMapper(mcs, id)
   212  	return gossip.NewGossipServiceWithServer(config, &orgCryptoService{}, mcs,
   213  		idMapper, id, nil)
   214  }
   215  
   216  // Create new instance of KVLedger to be used for testing
   217  func newCommitter(id int) committer.Committer {
   218  	cb, _ := test.MakeGenesisBlock(strconv.Itoa(id))
   219  	ledger, _ := ledgermgmt.CreateLedger(cb)
   220  	return committer.NewLedgerCommitter(ledger, &validator.MockValidator{})
   221  }
   222  
   223  // Constructing pseudo peer node, simulating only gossip and state transfer part
   224  func newPeerNodeWithGossip(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor, g gossip.Gossip) *peerNode {
   225  	cs := &cryptoServiceMock{acceptor: acceptor}
   226  	// Gossip component based on configuration provided and communication module
   227  	if g == nil {
   228  		g = newGossipInstance(config, &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor})
   229  	}
   230  
   231  	logger.Debug("Joinning channel", util.GetTestChainID())
   232  	g.JoinChan(&joinChanMsg{}, common.ChainID(util.GetTestChainID()))
   233  
   234  	// Initialize pseudo peer simulator, which has only three
   235  	// basic parts
   236  
   237  	sp := NewGossipStateProvider(util.GetTestChainID(), g, committer, cs)
   238  	if sp == nil {
   239  		return nil
   240  	}
   241  
   242  	return &peerNode{
   243  		port:   config.BindPort,
   244  		g:      g,
   245  		s:      sp,
   246  		commit: committer,
   247  		cs:     cs,
   248  	}
   249  }
   250  
   251  // Constructing pseudo peer node, simulating only gossip and state transfer part
   252  func newPeerNode(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor) *peerNode {
   253  	return newPeerNodeWithGossip(config, committer, acceptor, nil)
   254  }
   255  
   256  func TestNilDirectMsg(t *testing.T) {
   257  	mc := &mockCommitter{}
   258  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   259  	g := &mocks.GossipMock{}
   260  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   261  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   262  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   263  	defer p.shutdown()
   264  	p.s.(*GossipStateProviderImpl).handleStateRequest(nil)
   265  	p.s.(*GossipStateProviderImpl).directMessage(nil)
   266  	sMsg, _ := p.s.(*GossipStateProviderImpl).stateRequestMessage(uint64(10), uint64(8)).NoopSign()
   267  	req := &comm.ReceivedMessageImpl{
   268  		SignedGossipMessage: sMsg,
   269  	}
   270  	p.s.(*GossipStateProviderImpl).directMessage(req)
   271  }
   272  
   273  func TestNilAddPayload(t *testing.T) {
   274  	mc := &mockCommitter{}
   275  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   276  	g := &mocks.GossipMock{}
   277  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   278  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   279  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   280  	defer p.shutdown()
   281  	err := p.s.AddPayload(nil)
   282  	assert.Error(t, err)
   283  	assert.Contains(t, err.Error(), "nil")
   284  }
   285  
   286  func TestAddPayloadLedgerUnavailable(t *testing.T) {
   287  	mc := &mockCommitter{}
   288  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   289  	g := &mocks.GossipMock{}
   290  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   291  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   292  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   293  	defer p.shutdown()
   294  	// Simulate a problem in the ledger
   295  	failedLedger := mock.Mock{}
   296  	failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger"))
   297  	mc.Lock()
   298  	mc.Mock = failedLedger
   299  	mc.Unlock()
   300  
   301  	rawblock := pcomm.NewBlock(uint64(1), []byte{})
   302  	b, _ := pb.Marshal(rawblock)
   303  	err := p.s.AddPayload(&proto.Payload{
   304  		SeqNum: uint64(1),
   305  		Data:   b,
   306  	})
   307  	assert.Error(t, err)
   308  	assert.Contains(t, err.Error(), "Failed obtaining ledger height")
   309  	assert.Contains(t, err.Error(), "cannot query ledger")
   310  }
   311  
   312  func TestOverPopulation(t *testing.T) {
   313  	// Scenario: Add to the state provider blocks
   314  	// with a gap in between, and ensure that the payload buffer
   315  	// rejects blocks starting if the distance between the ledger height to the latest
   316  	// block it contains is bigger than defMaxBlockDistance.
   317  
   318  	mc := &mockCommitter{}
   319  	blocksPassedToLedger := make(chan uint64, 10)
   320  	mc.On("Commit", mock.Anything).Run(func(arg mock.Arguments) {
   321  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   322  	})
   323  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   324  	g := &mocks.GossipMock{}
   325  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   326  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   327  	p := newPeerNode(newGossipConfig(0), mc, noopPeerIdentityAcceptor)
   328  	defer p.shutdown()
   329  
   330  	// Add some blocks in a sequential manner and make sure it works
   331  	for i := 1; i <= 4; i++ {
   332  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   333  		b, _ := pb.Marshal(rawblock)
   334  		assert.NoError(t, p.s.AddPayload(&proto.Payload{
   335  			SeqNum: uint64(i),
   336  			Data:   b,
   337  		}))
   338  	}
   339  
   340  	// Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9]
   341  	// Should succeed
   342  	for i := 10; i <= defMaxBlockDistance; i++ {
   343  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   344  		b, _ := pb.Marshal(rawblock)
   345  		assert.NoError(t, p.s.AddPayload(&proto.Payload{
   346  			SeqNum: uint64(i),
   347  			Data:   b,
   348  		}))
   349  	}
   350  
   351  	// Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10
   352  	// Should fail.
   353  	for i := defMaxBlockDistance + 1; i <= defMaxBlockDistance*10; i++ {
   354  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   355  		b, _ := pb.Marshal(rawblock)
   356  		assert.Error(t, p.s.AddPayload(&proto.Payload{
   357  			SeqNum: uint64(i),
   358  			Data:   b,
   359  		}))
   360  	}
   361  
   362  	// Ensure only blocks 1-4 were passed to the ledger
   363  	close(blocksPassedToLedger)
   364  	i := 1
   365  	for seq := range blocksPassedToLedger {
   366  		assert.Equal(t, uint64(i), seq)
   367  		i++
   368  	}
   369  	assert.Equal(t, 5, i)
   370  
   371  	// Ensure we don't store too many blocks in memory
   372  	sp := p.s.(*GossipStateProviderImpl)
   373  	assert.True(t, sp.payloads.Size() < defMaxBlockDistance)
   374  
   375  }
   376  
   377  func TestFailures(t *testing.T) {
   378  	mc := &mockCommitter{}
   379  	mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil)
   380  	g := &mocks.GossipMock{}
   381  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   382  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   383  	assert.Panics(t, func() {
   384  		newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   385  	})
   386  	// Reprogram mock
   387  	mc.Mock = mock.Mock{}
   388  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger"))
   389  	assert.Nil(t, newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g))
   390  	// Reprogram mock
   391  	mc.Mock = mock.Mock{}
   392  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   393  	mc.On("GetBlocks", mock.Anything).Return(nil)
   394  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   395  	assert.Nil(t, p.s.GetBlock(uint64(1)))
   396  }
   397  
   398  func TestGossipReception(t *testing.T) {
   399  	signalChan := make(chan struct{})
   400  	rawblock := &pcomm.Block{
   401  		Header: &pcomm.BlockHeader{
   402  			Number: uint64(1),
   403  		},
   404  		Data: &pcomm.BlockData{
   405  			Data: [][]byte{},
   406  		},
   407  	}
   408  	b, _ := pb.Marshal(rawblock)
   409  
   410  	createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage {
   411  		c := make(chan *proto.GossipMessage)
   412  		gMsg := &proto.GossipMessage{
   413  			Channel: []byte("AAA"),
   414  			Content: &proto.GossipMessage_DataMsg{
   415  				DataMsg: &proto.DataMessage{
   416  					Payload: &proto.Payload{
   417  						SeqNum: 1,
   418  						Data:   b,
   419  					},
   420  				},
   421  			},
   422  		}
   423  		go func(c chan *proto.GossipMessage) {
   424  			// Wait for Accept() to be called
   425  			<-signalChan
   426  			// Simulate a message reception from the gossip component with an invalid channel
   427  			c <- gMsg
   428  			gMsg.Channel = []byte(util.GetTestChainID())
   429  			// Simulate a message reception from the gossip component
   430  			c <- gMsg
   431  		}(c)
   432  		return c
   433  	}
   434  
   435  	g := &mocks.GossipMock{}
   436  	rmc := createChan(signalChan)
   437  	g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) {
   438  		signalChan <- struct{}{}
   439  	})
   440  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   441  	mc := &mockCommitter{}
   442  	receivedChan := make(chan struct{})
   443  	mc.On("Commit", mock.Anything).Run(func(arguments mock.Arguments) {
   444  		block := arguments.Get(0).(*pcomm.Block)
   445  		assert.Equal(t, uint64(1), block.Header.Number)
   446  		receivedChan <- struct{}{}
   447  	})
   448  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   449  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   450  	defer p.shutdown()
   451  	select {
   452  	case <-receivedChan:
   453  	case <-time.After(time.Second * 15):
   454  		assert.Fail(t, "Didn't commit a block within a timely manner")
   455  	}
   456  }
   457  
   458  func TestAccessControl(t *testing.T) {
   459  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   460  	ledgermgmt.InitializeTestEnv()
   461  	defer ledgermgmt.CleanupTestEnv()
   462  
   463  	bootstrapSetSize := 5
   464  	bootstrapSet := make([]*peerNode, 0)
   465  
   466  	authorizedPeers := map[string]struct{}{
   467  		"localhost:5610": {},
   468  		"localhost:5615": {},
   469  		"localhost:5618": {},
   470  		"localhost:5621": {},
   471  	}
   472  
   473  	blockPullPolicy := func(identity api.PeerIdentityType) error {
   474  		if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized {
   475  			return nil
   476  		}
   477  		return errors.New("Not authorized")
   478  	}
   479  
   480  	for i := 0; i < bootstrapSetSize; i++ {
   481  		commit := newCommitter(i)
   482  		bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(i), commit, blockPullPolicy))
   483  	}
   484  
   485  	defer func() {
   486  		for _, p := range bootstrapSet {
   487  			p.shutdown()
   488  		}
   489  	}()
   490  
   491  	msgCount := 5
   492  
   493  	for i := 1; i <= msgCount; i++ {
   494  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   495  		if b, err := pb.Marshal(rawblock); err == nil {
   496  			payload := &proto.Payload{
   497  				SeqNum: uint64(i),
   498  				Data:   b,
   499  			}
   500  			bootstrapSet[0].s.AddPayload(payload)
   501  		} else {
   502  			t.Fail()
   503  		}
   504  	}
   505  
   506  	standardPeerSetSize := 10
   507  	peersSet := make([]*peerNode, 0)
   508  
   509  	for i := 0; i < standardPeerSetSize; i++ {
   510  		commit := newCommitter(bootstrapSetSize + i)
   511  		peersSet = append(peersSet, newPeerNode(newGossipConfig(bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, blockPullPolicy))
   512  	}
   513  
   514  	defer func() {
   515  		for _, p := range peersSet {
   516  			p.shutdown()
   517  		}
   518  	}()
   519  
   520  	waitUntilTrueOrTimeout(t, func() bool {
   521  		for _, p := range peersSet {
   522  			if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standardPeerSetSize-1 {
   523  				logger.Debug("Peer discovery has not finished yet")
   524  				return false
   525  			}
   526  		}
   527  		logger.Debug("All peer discovered each other!!!")
   528  		return true
   529  	}, 30*time.Second)
   530  
   531  	logger.Debug("Waiting for all blocks to arrive.")
   532  	waitUntilTrueOrTimeout(t, func() bool {
   533  		logger.Debug("Trying to see all authorized peers get all blocks, and all non-authorized didn't")
   534  		for _, p := range peersSet {
   535  			height, err := p.commit.LedgerHeight()
   536  			id := fmt.Sprintf("localhost:%d", p.port)
   537  			if _, isAuthorized := authorizedPeers[id]; isAuthorized {
   538  				if height != uint64(msgCount+1) || err != nil {
   539  					return false
   540  				}
   541  			} else {
   542  				if err == nil && height > 1 {
   543  					assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height)
   544  				}
   545  			}
   546  		}
   547  		logger.Debug("All peers have same ledger height!!!")
   548  		return true
   549  	}, 60*time.Second)
   550  }
   551  
   552  /*// Simple scenario to start first booting node, gossip a message
   553  // then start second node and verify second node also receives it
   554  func TestNewGossipStateProvider_GossipingOneMessage(t *testing.T) {
   555  	bootId := 0
   556  	ledgerPath := "/tmp/tests/ledger/"
   557  	defer os.RemoveAll(ledgerPath)
   558  
   559  	bootNodeCommitter := newCommitter(bootId, ledgerPath + "node/")
   560  	defer bootNodeCommitter.Close()
   561  
   562  	bootNode := newPeerNode(newGossipConfig(bootId, 100), bootNodeCommitter)
   563  	defer bootNode.shutdown()
   564  
   565  	rawblock := &peer.Block2{}
   566  	if err := pb.Unmarshal([]byte{}, rawblock); err != nil {
   567  		t.Fail()
   568  	}
   569  
   570  	if bytes, err := pb.Marshal(rawblock); err == nil {
   571  		payload := &proto.Payload{1, "", bytes}
   572  		bootNode.s.AddPayload(payload)
   573  	} else {
   574  		t.Fail()
   575  	}
   576  
   577  	waitUntilTrueOrTimeout(t, func() bool {
   578  		if block := bootNode.s.GetBlock(uint64(1)); block != nil {
   579  			return true
   580  		}
   581  		return false
   582  	}, 5 * time.Second)
   583  
   584  	bootNode.g.Gossip(createDataMsg(uint64(1), []byte{}, ""))
   585  
   586  	peerCommitter := newCommitter(1, ledgerPath + "node/")
   587  	defer peerCommitter.Close()
   588  
   589  	peer := newPeerNode(newGossipConfig(1, 100, bootId), peerCommitter)
   590  	defer peer.shutdown()
   591  
   592  	ready := make(chan interface{})
   593  
   594  	go func(p *peerNode) {
   595  		for len(p.g.GetPeers()) != 1 {
   596  			time.Sleep(100 * time.Millisecond)
   597  		}
   598  		ready <- struct{}{}
   599  	}(peer)
   600  
   601  	select {
   602  	case <-ready:
   603  		{
   604  			break
   605  		}
   606  	case <-time.After(1 * time.Second):
   607  		{
   608  			t.Fail()
   609  		}
   610  	}
   611  
   612  	// Let sure anti-entropy will have a chance to bring missing block
   613  	waitUntilTrueOrTimeout(t, func() bool {
   614  		if block := peer.s.GetBlock(uint64(1)); block != nil {
   615  			return true
   616  		}
   617  		return false
   618  	}, 2 * defAntiEntropyInterval + 1 * time.Second)
   619  
   620  	block := peer.s.GetBlock(uint64(1))
   621  
   622  	assert.NotNil(t, block)
   623  }
   624  
   625  func TestNewGossipStateProvider_RepeatGossipingOneMessage(t *testing.T) {
   626  	for i := 0; i < 10; i++ {
   627  		TestNewGossipStateProvider_GossipingOneMessage(t)
   628  	}
   629  }*/
   630  
   631  func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) {
   632  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   633  	ledgermgmt.InitializeTestEnv()
   634  	defer ledgermgmt.CleanupTestEnv()
   635  
   636  	bootstrapSetSize := 5
   637  	bootstrapSet := make([]*peerNode, 0)
   638  
   639  	for i := 0; i < bootstrapSetSize; i++ {
   640  		commit := newCommitter(i)
   641  		bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(i), commit, noopPeerIdentityAcceptor))
   642  	}
   643  
   644  	defer func() {
   645  		for _, p := range bootstrapSet {
   646  			p.shutdown()
   647  		}
   648  	}()
   649  
   650  	msgCount := 10
   651  
   652  	for i := 1; i <= msgCount; i++ {
   653  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   654  		if b, err := pb.Marshal(rawblock); err == nil {
   655  			payload := &proto.Payload{
   656  				SeqNum: uint64(i),
   657  				Data:   b,
   658  			}
   659  			bootstrapSet[0].s.AddPayload(payload)
   660  		} else {
   661  			t.Fail()
   662  		}
   663  	}
   664  
   665  	standartPeersSize := 10
   666  	peersSet := make([]*peerNode, 0)
   667  
   668  	for i := 0; i < standartPeersSize; i++ {
   669  		commit := newCommitter(bootstrapSetSize + i)
   670  		peersSet = append(peersSet, newPeerNode(newGossipConfig(bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, noopPeerIdentityAcceptor))
   671  	}
   672  
   673  	defer func() {
   674  		for _, p := range peersSet {
   675  			p.shutdown()
   676  		}
   677  	}()
   678  
   679  	waitUntilTrueOrTimeout(t, func() bool {
   680  		for _, p := range peersSet {
   681  			if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standartPeersSize-1 {
   682  				logger.Debug("Peer discovery has not finished yet")
   683  				return false
   684  			}
   685  		}
   686  		logger.Debug("All peer discovered each other!!!")
   687  		return true
   688  	}, 30*time.Second)
   689  
   690  	logger.Debug("Waiting for all blocks to arrive.")
   691  	waitUntilTrueOrTimeout(t, func() bool {
   692  		logger.Debug("Trying to see all peers get all blocks")
   693  		for _, p := range peersSet {
   694  			height, err := p.commit.LedgerHeight()
   695  			if height != uint64(msgCount+1) || err != nil {
   696  				return false
   697  			}
   698  		}
   699  		logger.Debug("All peers have same ledger height!!!")
   700  		return true
   701  	}, 60*time.Second)
   702  }
   703  
   704  func TestGossipStateProvider_TestStateMessages(t *testing.T) {
   705  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   706  	ledgermgmt.InitializeTestEnv()
   707  	defer ledgermgmt.CleanupTestEnv()
   708  
   709  	bootPeer := newPeerNode(newGossipConfig(0), newCommitter(0), noopPeerIdentityAcceptor)
   710  	defer bootPeer.shutdown()
   711  
   712  	peer := newPeerNode(newGossipConfig(1, 0), newCommitter(1), noopPeerIdentityAcceptor)
   713  	defer peer.shutdown()
   714  
   715  	naiveStateMsgPredicate := func(message interface{}) bool {
   716  		return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage()
   717  	}
   718  
   719  	_, bootCh := bootPeer.g.Accept(naiveStateMsgPredicate, true)
   720  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
   721  
   722  	wg := sync.WaitGroup{}
   723  	wg.Add(2)
   724  
   725  	go func() {
   726  		msg := <-bootCh
   727  		logger.Info("Bootstrap node got message, ", msg)
   728  		assert.True(t, msg.GetGossipMessage().GetStateRequest() != nil)
   729  		msg.Respond(&proto.GossipMessage{
   730  			Content: &proto.GossipMessage_StateResponse{&proto.RemoteStateResponse{nil}},
   731  		})
   732  		wg.Done()
   733  	}()
   734  
   735  	go func() {
   736  		msg := <-peerCh
   737  		logger.Info("Peer node got an answer, ", msg)
   738  		assert.True(t, msg.GetGossipMessage().GetStateResponse() != nil)
   739  		wg.Done()
   740  
   741  	}()
   742  
   743  	readyCh := make(chan struct{})
   744  	go func() {
   745  		wg.Wait()
   746  		readyCh <- struct{}{}
   747  	}()
   748  
   749  	time.Sleep(time.Duration(5) * time.Second)
   750  	logger.Info("Sending gossip message with remote state request")
   751  
   752  	chainID := common.ChainID(util.GetTestChainID())
   753  
   754  	peer.g.Send(&proto.GossipMessage{
   755  		Content: &proto.GossipMessage_StateRequest{&proto.RemoteStateRequest{0, 1}},
   756  	}, &comm.RemotePeer{peer.g.PeersOfChannel(chainID)[0].Endpoint, peer.g.PeersOfChannel(chainID)[0].PKIid})
   757  	logger.Info("Waiting until peers exchange messages")
   758  
   759  	select {
   760  	case <-readyCh:
   761  		{
   762  			logger.Info("Done!!!")
   763  
   764  		}
   765  	case <-time.After(time.Duration(10) * time.Second):
   766  		{
   767  			t.Fail()
   768  		}
   769  	}
   770  }
   771  
   772  // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into
   773  // local ledger, next spawning a new peer waiting for anti-entropy procedure to
   774  // complete missing blocks. Since state transfer messages now batched, it is expected
   775  // to see _exactly_ two messages with state transfer response.
   776  func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) {
   777  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   778  	ledgermgmt.InitializeTestEnv()
   779  	defer ledgermgmt.CleanupTestEnv()
   780  
   781  	bootPeer := newPeerNode(newGossipConfig(0), newCommitter(0), noopPeerIdentityAcceptor)
   782  	defer bootPeer.shutdown()
   783  
   784  	msgCount := defAntiEntropyBatchSize + 5
   785  	expectedMessagesCnt := 2
   786  
   787  	for i := 1; i <= msgCount; i++ {
   788  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   789  		if b, err := pb.Marshal(rawblock); err == nil {
   790  			payload := &proto.Payload{
   791  				SeqNum: uint64(i),
   792  				Data:   b,
   793  			}
   794  			bootPeer.s.AddPayload(payload)
   795  		} else {
   796  			t.Fail()
   797  		}
   798  	}
   799  
   800  	peer := newPeerNode(newGossipConfig(1, 0), newCommitter(1), noopPeerIdentityAcceptor)
   801  	defer peer.shutdown()
   802  
   803  	naiveStateMsgPredicate := func(message interface{}) bool {
   804  		return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage()
   805  	}
   806  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
   807  
   808  	messageCh := make(chan struct{})
   809  	stopWaiting := make(chan struct{})
   810  
   811  	// Number of submitted messages is defAntiEntropyBatchSize + 5, therefore
   812  	// expected number of batches is expectedMessagesCnt = 2. Following go routine
   813  	// makes sure it receives expected amount of messages and sends signal of success
   814  	// to continue the test
   815  	go func(expected int) {
   816  		cnt := 0
   817  		for cnt < expected {
   818  			select {
   819  			case <-peerCh:
   820  				{
   821  					cnt++
   822  				}
   823  
   824  			case <-stopWaiting:
   825  				{
   826  					return
   827  				}
   828  			}
   829  		}
   830  
   831  		messageCh <- struct{}{}
   832  	}(expectedMessagesCnt)
   833  
   834  	// Waits for message which indicates that expected number of message batches received
   835  	// otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds
   836  	select {
   837  	case <-messageCh:
   838  		{
   839  			// Once we got message which indicate of two batches being received,
   840  			// making sure messages indeed committed.
   841  			waitUntilTrueOrTimeout(t, func() bool {
   842  				if len(peer.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != 1 {
   843  					logger.Debug("Peer discovery has not finished yet")
   844  					return false
   845  				}
   846  				logger.Debug("All peer discovered each other!!!")
   847  				return true
   848  			}, 30*time.Second)
   849  
   850  			logger.Debug("Waiting for all blocks to arrive.")
   851  			waitUntilTrueOrTimeout(t, func() bool {
   852  				logger.Debug("Trying to see all peers get all blocks")
   853  				height, err := peer.commit.LedgerHeight()
   854  				if height != uint64(msgCount+1) || err != nil {
   855  					return false
   856  				}
   857  				logger.Debug("All peers have same ledger height!!!")
   858  				return true
   859  			}, 60*time.Second)
   860  		}
   861  	case <-time.After(defAntiEntropyInterval*2 + time.Second*1):
   862  		{
   863  			close(stopWaiting)
   864  			t.Fatal("Expected to receive two batches with missing payloads")
   865  		}
   866  	}
   867  }
   868  
   869  func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) {
   870  	ch := make(chan struct{})
   871  	go func() {
   872  		logger.Debug("Started to spin off, until predicate will be satisfied.")
   873  		for !predicate() {
   874  			time.Sleep(1 * time.Second)
   875  		}
   876  		ch <- struct{}{}
   877  		logger.Debug("Done.")
   878  	}()
   879  
   880  	select {
   881  	case <-ch:
   882  		break
   883  	case <-time.After(timeout):
   884  		t.Fatal("Timeout has expired")
   885  		break
   886  	}
   887  	logger.Debug("Stop waiting until timeout or true")
   888  }