github.com/inklabsfoundation/inkchain@v0.17.1-0.20181025012015-c3cef8062f19/gossip/state/state_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8                   http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package state
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	"strconv"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  
    28  	pb "github.com/golang/protobuf/proto"
    29  	"github.com/inklabsfoundation/inkchain/common/configtx/test"
    30  	"github.com/inklabsfoundation/inkchain/common/util"
    31  	"github.com/inklabsfoundation/inkchain/core/committer"
    32  	"github.com/inklabsfoundation/inkchain/core/ledger/ledgermgmt"
    33  	"github.com/inklabsfoundation/inkchain/core/mocks/validator"
    34  	"github.com/inklabsfoundation/inkchain/gossip/api"
    35  	"github.com/inklabsfoundation/inkchain/gossip/comm"
    36  	"github.com/inklabsfoundation/inkchain/gossip/common"
    37  	"github.com/inklabsfoundation/inkchain/gossip/gossip"
    38  	"github.com/inklabsfoundation/inkchain/gossip/identity"
    39  	"github.com/inklabsfoundation/inkchain/gossip/state/mocks"
    40  	gutil "github.com/inklabsfoundation/inkchain/gossip/util"
    41  	pcomm "github.com/inklabsfoundation/inkchain/protos/common"
    42  	proto "github.com/inklabsfoundation/inkchain/protos/gossip"
    43  	"github.com/spf13/viper"
    44  	"github.com/stretchr/testify/assert"
    45  	"github.com/stretchr/testify/mock"
    46  )
    47  
    48  var (
    49  	portPrefix = 5610
    50  )
    51  
    52  var orgID = []byte("ORG1")
    53  
    54  type peerIdentityAcceptor func(identity api.PeerIdentityType) error
    55  
    56  var noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error {
    57  	return nil
    58  }
    59  
    60  type joinChanMsg struct {
    61  }
    62  
    63  func init() {
    64  	gutil.SetupTestLogging()
    65  }
    66  
    67  // SequenceNumber returns the sequence number of the block that the message
    68  // is derived from
    69  func (*joinChanMsg) SequenceNumber() uint64 {
    70  	return uint64(time.Now().UnixNano())
    71  }
    72  
    73  // Members returns the organizations of the channel
    74  func (jcm *joinChanMsg) Members() []api.OrgIdentityType {
    75  	return []api.OrgIdentityType{orgID}
    76  }
    77  
    78  // AnchorPeersOf returns the anchor peers of the given organization
    79  func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
    80  	return []api.AnchorPeer{}
    81  }
    82  
    83  type orgCryptoService struct {
    84  }
    85  
    86  // OrgByPeerIdentity returns the OrgIdentityType
    87  // of a given peer identity
    88  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
    89  	return orgID
    90  }
    91  
    92  // Verify verifies a JoinChannelMessage, returns nil on success,
    93  // and an error on failure
    94  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
    95  	return nil
    96  }
    97  
    98  type cryptoServiceMock struct {
    99  	acceptor peerIdentityAcceptor
   100  }
   101  
   102  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   103  func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType {
   104  	return common.PKIidType(peerIdentity)
   105  }
   106  
   107  // VerifyBlock returns nil if the block is properly signed,
   108  // else returns error
   109  func (*cryptoServiceMock) VerifyBlock(chainID common.ChainID, seqNum uint64, signedBlock []byte) error {
   110  	return nil
   111  }
   112  
   113  // Sign signs msg with this peer's signing key and outputs
   114  // the signature if no error occurred.
   115  func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) {
   116  	clone := make([]byte, len(msg))
   117  	copy(clone, msg)
   118  	return clone, nil
   119  }
   120  
   121  // Verify checks that signature is a valid signature of message under a peer's verification key.
   122  // If the verification succeeded, Verify returns nil meaning no error occurred.
   123  // If peerCert is nil, then the signature is verified against this peer's verification key.
   124  func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   125  	equal := bytes.Equal(signature, message)
   126  	if !equal {
   127  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   128  	}
   129  	return nil
   130  }
   131  
   132  // VerifyByChannel checks that signature is a valid signature of message
   133  // under a peer's verification key, but also in the context of a specific channel.
   134  // If the verification succeeded, Verify returns nil meaning no error occurred.
   135  // If peerIdentity is nil, then the signature is verified against this peer's verification key.
   136  func (cs *cryptoServiceMock) VerifyByChannel(chainID common.ChainID, peerIdentity api.PeerIdentityType, signature, message []byte) error {
   137  	return cs.acceptor(peerIdentity)
   138  }
   139  
   140  func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   141  	return nil
   142  }
   143  
   144  func bootPeers(ids ...int) []string {
   145  	peers := []string{}
   146  	for _, id := range ids {
   147  		peers = append(peers, fmt.Sprintf("localhost:%d", id+portPrefix))
   148  	}
   149  	return peers
   150  }
   151  
   152  // Simple presentation of peer which includes only
   153  // communication module, gossip and state transfer
   154  type peerNode struct {
   155  	port   int
   156  	g      gossip.Gossip
   157  	s      GossipStateProvider
   158  	cs     *cryptoServiceMock
   159  	commit committer.Committer
   160  }
   161  
   162  // Shutting down all modules used
   163  func (node *peerNode) shutdown() {
   164  	node.s.Stop()
   165  	node.g.Stop()
   166  }
   167  
   168  type mockCommitter struct {
   169  	mock.Mock
   170  	sync.Mutex
   171  }
   172  
   173  func (mc *mockCommitter) Commit(block *pcomm.Block) error {
   174  	mc.Called(block)
   175  	return nil
   176  }
   177  
   178  func (mc *mockCommitter) LedgerHeight() (uint64, error) {
   179  	mc.Lock()
   180  	defer mc.Unlock()
   181  	if mc.Called().Get(1) == nil {
   182  		return mc.Called().Get(0).(uint64), nil
   183  	}
   184  	return mc.Called().Get(0).(uint64), mc.Called().Get(1).(error)
   185  }
   186  
   187  func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block {
   188  	if mc.Called(blockSeqs).Get(0) == nil {
   189  		return nil
   190  	}
   191  	return mc.Called(blockSeqs).Get(0).([]*pcomm.Block)
   192  }
   193  
   194  func (*mockCommitter) Close() {
   195  }
   196  
   197  // Default configuration to be used for gossip and communication modules
   198  func newGossipConfig(id int, boot ...int) *gossip.Config {
   199  	port := id + portPrefix
   200  	return &gossip.Config{
   201  		BindPort:                   port,
   202  		BootstrapPeers:             bootPeers(boot...),
   203  		ID:                         fmt.Sprintf("p%d", id),
   204  		MaxBlockCountToStore:       0,
   205  		MaxPropagationBurstLatency: time.Duration(10) * time.Millisecond,
   206  		MaxPropagationBurstSize:    10,
   207  		PropagateIterations:        1,
   208  		PropagatePeerNum:           3,
   209  		PullInterval:               time.Duration(4) * time.Second,
   210  		PullPeerNum:                5,
   211  		InternalEndpoint:           fmt.Sprintf("localhost:%d", port),
   212  		PublishCertPeriod:          10 * time.Second,
   213  		RequestStateInfoInterval:   4 * time.Second,
   214  		PublishStateInfoInterval:   4 * time.Second,
   215  	}
   216  }
   217  
   218  // Create gossip instance
   219  func newGossipInstance(config *gossip.Config, mcs api.MessageCryptoService) gossip.Gossip {
   220  	id := api.PeerIdentityType(config.InternalEndpoint)
   221  	idMapper := identity.NewIdentityMapper(mcs, id)
   222  	return gossip.NewGossipServiceWithServer(config, &orgCryptoService{}, mcs,
   223  		idMapper, id, nil)
   224  }
   225  
   226  // Create new instance of KVLedger to be used for testing
   227  func newCommitter(id int) committer.Committer {
   228  	cb, _ := test.MakeGenesisBlock(strconv.Itoa(id))
   229  	ledger, _ := ledgermgmt.CreateLedger(cb)
   230  	return committer.NewLedgerCommitter(ledger, &validator.MockValidator{})
   231  }
   232  
   233  // Constructing pseudo peer node, simulating only gossip and state transfer part
   234  func newPeerNodeWithGossip(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor, g gossip.Gossip) *peerNode {
   235  	cs := &cryptoServiceMock{acceptor: acceptor}
   236  	// Gossip component based on configuration provided and communication module
   237  	if g == nil {
   238  		g = newGossipInstance(config, &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor})
   239  	}
   240  
   241  	logger.Debug("Joinning channel", util.GetTestChainID())
   242  	g.JoinChan(&joinChanMsg{}, common.ChainID(util.GetTestChainID()))
   243  
   244  	// Initialize pseudo peer simulator, which has only three
   245  	// basic parts
   246  
   247  	sp := NewGossipStateProvider(util.GetTestChainID(), g, committer, cs)
   248  	if sp == nil {
   249  		return nil
   250  	}
   251  
   252  	return &peerNode{
   253  		port:   config.BindPort,
   254  		g:      g,
   255  		s:      sp,
   256  		commit: committer,
   257  		cs:     cs,
   258  	}
   259  }
   260  
   261  // Constructing pseudo peer node, simulating only gossip and state transfer part
   262  func newPeerNode(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor) *peerNode {
   263  	return newPeerNodeWithGossip(config, committer, acceptor, nil)
   264  }
   265  
   266  func TestNilDirectMsg(t *testing.T) {
   267  	mc := &mockCommitter{}
   268  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   269  	g := &mocks.GossipMock{}
   270  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   271  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   272  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   273  	defer p.shutdown()
   274  	p.s.(*GossipStateProviderImpl).handleStateRequest(nil)
   275  	p.s.(*GossipStateProviderImpl).directMessage(nil)
   276  	sMsg, _ := p.s.(*GossipStateProviderImpl).stateRequestMessage(uint64(10), uint64(8)).NoopSign()
   277  	req := &comm.ReceivedMessageImpl{
   278  		SignedGossipMessage: sMsg,
   279  	}
   280  	p.s.(*GossipStateProviderImpl).directMessage(req)
   281  }
   282  
   283  func TestNilAddPayload(t *testing.T) {
   284  	mc := &mockCommitter{}
   285  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   286  	g := &mocks.GossipMock{}
   287  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   288  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   289  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   290  	defer p.shutdown()
   291  	err := p.s.AddPayload(nil)
   292  	assert.Error(t, err)
   293  	assert.Contains(t, err.Error(), "nil")
   294  }
   295  
   296  func TestAddPayloadLedgerUnavailable(t *testing.T) {
   297  	mc := &mockCommitter{}
   298  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   299  	g := &mocks.GossipMock{}
   300  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   301  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   302  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   303  	defer p.shutdown()
   304  	// Simulate a problem in the ledger
   305  	failedLedger := mock.Mock{}
   306  	failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger"))
   307  	mc.Lock()
   308  	mc.Mock = failedLedger
   309  	mc.Unlock()
   310  
   311  	rawblock := pcomm.NewBlock(uint64(1), []byte{})
   312  	b, _ := pb.Marshal(rawblock)
   313  	err := p.s.AddPayload(&proto.Payload{
   314  		SeqNum: uint64(1),
   315  		Data:   b,
   316  	})
   317  	assert.Error(t, err)
   318  	assert.Contains(t, err.Error(), "Failed obtaining ledger height")
   319  	assert.Contains(t, err.Error(), "cannot query ledger")
   320  }
   321  
   322  func TestOverPopulation(t *testing.T) {
   323  	// Scenario: Add to the state provider blocks
   324  	// with a gap in between, and ensure that the payload buffer
   325  	// rejects blocks starting if the distance between the ledger height to the latest
   326  	// block it contains is bigger than defMaxBlockDistance.
   327  
   328  	mc := &mockCommitter{}
   329  	blocksPassedToLedger := make(chan uint64, 10)
   330  	mc.On("Commit", mock.Anything).Run(func(arg mock.Arguments) {
   331  		blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
   332  	})
   333  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   334  	g := &mocks.GossipMock{}
   335  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   336  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   337  	p := newPeerNode(newGossipConfig(0), mc, noopPeerIdentityAcceptor)
   338  	defer p.shutdown()
   339  
   340  	// Add some blocks in a sequential manner and make sure it works
   341  	for i := 1; i <= 4; i++ {
   342  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   343  		b, _ := pb.Marshal(rawblock)
   344  		assert.NoError(t, p.s.AddPayload(&proto.Payload{
   345  			SeqNum: uint64(i),
   346  			Data:   b,
   347  		}))
   348  	}
   349  
   350  	// Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9]
   351  	// Should succeed
   352  	for i := 10; i <= defMaxBlockDistance; i++ {
   353  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   354  		b, _ := pb.Marshal(rawblock)
   355  		assert.NoError(t, p.s.AddPayload(&proto.Payload{
   356  			SeqNum: uint64(i),
   357  			Data:   b,
   358  		}))
   359  	}
   360  
   361  	// Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10
   362  	// Should fail.
   363  	for i := defMaxBlockDistance + 1; i <= defMaxBlockDistance*10; i++ {
   364  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   365  		b, _ := pb.Marshal(rawblock)
   366  		assert.Error(t, p.s.AddPayload(&proto.Payload{
   367  			SeqNum: uint64(i),
   368  			Data:   b,
   369  		}))
   370  	}
   371  
   372  	// Ensure only blocks 1-4 were passed to the ledger
   373  	close(blocksPassedToLedger)
   374  	i := 1
   375  	for seq := range blocksPassedToLedger {
   376  		assert.Equal(t, uint64(i), seq)
   377  		i++
   378  	}
   379  	assert.Equal(t, 5, i)
   380  
   381  	// Ensure we don't store too many blocks in memory
   382  	sp := p.s.(*GossipStateProviderImpl)
   383  	assert.True(t, sp.payloads.Size() < defMaxBlockDistance)
   384  
   385  }
   386  
   387  func TestFailures(t *testing.T) {
   388  	mc := &mockCommitter{}
   389  	mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil)
   390  	g := &mocks.GossipMock{}
   391  	g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
   392  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   393  	assert.Panics(t, func() {
   394  		newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   395  	})
   396  	// Reprogram mock
   397  	mc.Mock = mock.Mock{}
   398  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger"))
   399  	assert.Nil(t, newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g))
   400  	// Reprogram mock
   401  	mc.Mock = mock.Mock{}
   402  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   403  	mc.On("GetBlocks", mock.Anything).Return(nil)
   404  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   405  	assert.Nil(t, p.s.GetBlock(uint64(1)))
   406  }
   407  
   408  func TestGossipReception(t *testing.T) {
   409  	signalChan := make(chan struct{})
   410  	rawblock := &pcomm.Block{
   411  		Header: &pcomm.BlockHeader{
   412  			Number: uint64(1),
   413  		},
   414  		Data: &pcomm.BlockData{
   415  			Data: [][]byte{},
   416  		},
   417  	}
   418  	b, _ := pb.Marshal(rawblock)
   419  
   420  	createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage {
   421  		c := make(chan *proto.GossipMessage)
   422  		gMsg := &proto.GossipMessage{
   423  			Channel: []byte("AAA"),
   424  			Content: &proto.GossipMessage_DataMsg{
   425  				DataMsg: &proto.DataMessage{
   426  					Payload: &proto.Payload{
   427  						SeqNum: 1,
   428  						Data:   b,
   429  					},
   430  				},
   431  			},
   432  		}
   433  		go func(c chan *proto.GossipMessage) {
   434  			// Wait for Accept() to be called
   435  			<-signalChan
   436  			// Simulate a message reception from the gossip component with an invalid channel
   437  			c <- gMsg
   438  			gMsg.Channel = []byte(util.GetTestChainID())
   439  			// Simulate a message reception from the gossip component
   440  			c <- gMsg
   441  		}(c)
   442  		return c
   443  	}
   444  
   445  	g := &mocks.GossipMock{}
   446  	rmc := createChan(signalChan)
   447  	g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) {
   448  		signalChan <- struct{}{}
   449  	})
   450  	g.On("Accept", mock.Anything, true).Return(nil, make(<-chan proto.ReceivedMessage))
   451  	mc := &mockCommitter{}
   452  	receivedChan := make(chan struct{})
   453  	mc.On("Commit", mock.Anything).Run(func(arguments mock.Arguments) {
   454  		block := arguments.Get(0).(*pcomm.Block)
   455  		assert.Equal(t, uint64(1), block.Header.Number)
   456  		receivedChan <- struct{}{}
   457  	})
   458  	mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
   459  	p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)
   460  	defer p.shutdown()
   461  	select {
   462  	case <-receivedChan:
   463  	case <-time.After(time.Second * 15):
   464  		assert.Fail(t, "Didn't commit a block within a timely manner")
   465  	}
   466  }
   467  
   468  func TestAccessControl(t *testing.T) {
   469  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   470  	ledgermgmt.InitializeTestEnv()
   471  	defer ledgermgmt.CleanupTestEnv()
   472  
   473  	bootstrapSetSize := 5
   474  	bootstrapSet := make([]*peerNode, 0)
   475  
   476  	authorizedPeers := map[string]struct{}{
   477  		"localhost:5610": {},
   478  		"localhost:5615": {},
   479  		"localhost:5618": {},
   480  		"localhost:5621": {},
   481  	}
   482  
   483  	blockPullPolicy := func(identity api.PeerIdentityType) error {
   484  		if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized {
   485  			return nil
   486  		}
   487  		return errors.New("Not authorized")
   488  	}
   489  
   490  	for i := 0; i < bootstrapSetSize; i++ {
   491  		commit := newCommitter(i)
   492  		bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(i), commit, blockPullPolicy))
   493  	}
   494  
   495  	defer func() {
   496  		for _, p := range bootstrapSet {
   497  			p.shutdown()
   498  		}
   499  	}()
   500  
   501  	msgCount := 5
   502  
   503  	for i := 1; i <= msgCount; i++ {
   504  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   505  		if b, err := pb.Marshal(rawblock); err == nil {
   506  			payload := &proto.Payload{
   507  				SeqNum: uint64(i),
   508  				Data:   b,
   509  			}
   510  			bootstrapSet[0].s.AddPayload(payload)
   511  		} else {
   512  			t.Fail()
   513  		}
   514  	}
   515  
   516  	standardPeerSetSize := 10
   517  	peersSet := make([]*peerNode, 0)
   518  
   519  	for i := 0; i < standardPeerSetSize; i++ {
   520  		commit := newCommitter(bootstrapSetSize + i)
   521  		peersSet = append(peersSet, newPeerNode(newGossipConfig(bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, blockPullPolicy))
   522  	}
   523  
   524  	defer func() {
   525  		for _, p := range peersSet {
   526  			p.shutdown()
   527  		}
   528  	}()
   529  
   530  	waitUntilTrueOrTimeout(t, func() bool {
   531  		for _, p := range peersSet {
   532  			if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standardPeerSetSize-1 {
   533  				logger.Debug("Peer discovery has not finished yet")
   534  				return false
   535  			}
   536  		}
   537  		logger.Debug("All peer discovered each other!!!")
   538  		return true
   539  	}, 30*time.Second)
   540  
   541  	logger.Debug("Waiting for all blocks to arrive.")
   542  	waitUntilTrueOrTimeout(t, func() bool {
   543  		logger.Debug("Trying to see all authorized peers get all blocks, and all non-authorized didn't")
   544  		for _, p := range peersSet {
   545  			height, err := p.commit.LedgerHeight()
   546  			id := fmt.Sprintf("localhost:%d", p.port)
   547  			if _, isAuthorized := authorizedPeers[id]; isAuthorized {
   548  				if height != uint64(msgCount+1) || err != nil {
   549  					return false
   550  				}
   551  			} else {
   552  				if err == nil && height > 1 {
   553  					assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height)
   554  				}
   555  			}
   556  		}
   557  		logger.Debug("All peers have same ledger height!!!")
   558  		return true
   559  	}, 60*time.Second)
   560  }
   561  
   562  /*// Simple scenario to start first booting node, gossip a message
   563  // then start second node and verify second node also receives it
   564  func TestNewGossipStateProvider_GossipingOneMessage(t *testing.T) {
   565  	bootId := 0
   566  	ledgerPath := "/tmp/tests/ledger/"
   567  	defer os.RemoveAll(ledgerPath)
   568  
   569  	bootNodeCommitter := newCommitter(bootId, ledgerPath + "node/")
   570  	defer bootNodeCommitter.Close()
   571  
   572  	bootNode := newPeerNode(newGossipConfig(bootId, 100), bootNodeCommitter)
   573  	defer bootNode.shutdown()
   574  
   575  	rawblock := &peer.Block2{}
   576  	if err := pb.Unmarshal([]byte{}, rawblock); err != nil {
   577  		t.Fail()
   578  	}
   579  
   580  	if bytes, err := pb.Marshal(rawblock); err == nil {
   581  		payload := &proto.Payload{1, "", bytes}
   582  		bootNode.s.AddPayload(payload)
   583  	} else {
   584  		t.Fail()
   585  	}
   586  
   587  	waitUntilTrueOrTimeout(t, func() bool {
   588  		if block := bootNode.s.GetBlock(uint64(1)); block != nil {
   589  			return true
   590  		}
   591  		return false
   592  	}, 5 * time.Second)
   593  
   594  	bootNode.g.Gossip(createDataMsg(uint64(1), []byte{}, ""))
   595  
   596  	peerCommitter := newCommitter(1, ledgerPath + "node/")
   597  	defer peerCommitter.Close()
   598  
   599  	peer := newPeerNode(newGossipConfig(1, 100, bootId), peerCommitter)
   600  	defer peer.shutdown()
   601  
   602  	ready := make(chan interface{})
   603  
   604  	go func(p *peerNode) {
   605  		for len(p.g.GetPeers()) != 1 {
   606  			time.Sleep(100 * time.Millisecond)
   607  		}
   608  		ready <- struct{}{}
   609  	}(peer)
   610  
   611  	select {
   612  	case <-ready:
   613  		{
   614  			break
   615  		}
   616  	case <-time.After(1 * time.Second):
   617  		{
   618  			t.Fail()
   619  		}
   620  	}
   621  
   622  	// Let sure anti-entropy will have a chance to bring missing block
   623  	waitUntilTrueOrTimeout(t, func() bool {
   624  		if block := peer.s.GetBlock(uint64(1)); block != nil {
   625  			return true
   626  		}
   627  		return false
   628  	}, 2 * defAntiEntropyInterval + 1 * time.Second)
   629  
   630  	block := peer.s.GetBlock(uint64(1))
   631  
   632  	assert.NotNil(t, block)
   633  }
   634  
   635  func TestNewGossipStateProvider_RepeatGossipingOneMessage(t *testing.T) {
   636  	for i := 0; i < 10; i++ {
   637  		TestNewGossipStateProvider_GossipingOneMessage(t)
   638  	}
   639  }*/
   640  
   641  func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) {
   642  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   643  	ledgermgmt.InitializeTestEnv()
   644  	defer ledgermgmt.CleanupTestEnv()
   645  
   646  	bootstrapSetSize := 5
   647  	bootstrapSet := make([]*peerNode, 0)
   648  
   649  	for i := 0; i < bootstrapSetSize; i++ {
   650  		commit := newCommitter(i)
   651  		bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(i), commit, noopPeerIdentityAcceptor))
   652  	}
   653  
   654  	defer func() {
   655  		for _, p := range bootstrapSet {
   656  			p.shutdown()
   657  		}
   658  	}()
   659  
   660  	msgCount := 10
   661  
   662  	for i := 1; i <= msgCount; i++ {
   663  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   664  		if b, err := pb.Marshal(rawblock); err == nil {
   665  			payload := &proto.Payload{
   666  				SeqNum: uint64(i),
   667  				Data:   b,
   668  			}
   669  			bootstrapSet[0].s.AddPayload(payload)
   670  		} else {
   671  			t.Fail()
   672  		}
   673  	}
   674  
   675  	standartPeersSize := 10
   676  	peersSet := make([]*peerNode, 0)
   677  
   678  	for i := 0; i < standartPeersSize; i++ {
   679  		commit := newCommitter(bootstrapSetSize + i)
   680  		peersSet = append(peersSet, newPeerNode(newGossipConfig(bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, noopPeerIdentityAcceptor))
   681  	}
   682  
   683  	defer func() {
   684  		for _, p := range peersSet {
   685  			p.shutdown()
   686  		}
   687  	}()
   688  
   689  	waitUntilTrueOrTimeout(t, func() bool {
   690  		for _, p := range peersSet {
   691  			if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standartPeersSize-1 {
   692  				logger.Debug("Peer discovery has not finished yet")
   693  				return false
   694  			}
   695  		}
   696  		logger.Debug("All peer discovered each other!!!")
   697  		return true
   698  	}, 30*time.Second)
   699  
   700  	logger.Debug("Waiting for all blocks to arrive.")
   701  	waitUntilTrueOrTimeout(t, func() bool {
   702  		logger.Debug("Trying to see all peers get all blocks")
   703  		for _, p := range peersSet {
   704  			height, err := p.commit.LedgerHeight()
   705  			if height != uint64(msgCount+1) || err != nil {
   706  				return false
   707  			}
   708  		}
   709  		logger.Debug("All peers have same ledger height!!!")
   710  		return true
   711  	}, 60*time.Second)
   712  }
   713  
   714  func TestGossipStateProvider_TestStateMessages(t *testing.T) {
   715  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   716  	ledgermgmt.InitializeTestEnv()
   717  	defer ledgermgmt.CleanupTestEnv()
   718  
   719  	bootPeer := newPeerNode(newGossipConfig(0), newCommitter(0), noopPeerIdentityAcceptor)
   720  	defer bootPeer.shutdown()
   721  
   722  	peer := newPeerNode(newGossipConfig(1, 0), newCommitter(1), noopPeerIdentityAcceptor)
   723  	defer peer.shutdown()
   724  
   725  	naiveStateMsgPredicate := func(message interface{}) bool {
   726  		return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage()
   727  	}
   728  
   729  	_, bootCh := bootPeer.g.Accept(naiveStateMsgPredicate, true)
   730  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
   731  
   732  	wg := sync.WaitGroup{}
   733  	wg.Add(2)
   734  
   735  	go func() {
   736  		msg := <-bootCh
   737  		logger.Info("Bootstrap node got message, ", msg)
   738  		assert.True(t, msg.GetGossipMessage().GetStateRequest() != nil)
   739  		msg.Respond(&proto.GossipMessage{
   740  			Content: &proto.GossipMessage_StateResponse{&proto.RemoteStateResponse{nil}},
   741  		})
   742  		wg.Done()
   743  	}()
   744  
   745  	go func() {
   746  		msg := <-peerCh
   747  		logger.Info("Peer node got an answer, ", msg)
   748  		assert.True(t, msg.GetGossipMessage().GetStateResponse() != nil)
   749  		wg.Done()
   750  
   751  	}()
   752  
   753  	readyCh := make(chan struct{})
   754  	go func() {
   755  		wg.Wait()
   756  		readyCh <- struct{}{}
   757  	}()
   758  
   759  	time.Sleep(time.Duration(5) * time.Second)
   760  	logger.Info("Sending gossip message with remote state request")
   761  
   762  	chainID := common.ChainID(util.GetTestChainID())
   763  
   764  	peer.g.Send(&proto.GossipMessage{
   765  		Content: &proto.GossipMessage_StateRequest{&proto.RemoteStateRequest{0, 1}},
   766  	}, &comm.RemotePeer{peer.g.PeersOfChannel(chainID)[0].Endpoint, peer.g.PeersOfChannel(chainID)[0].PKIid})
   767  	logger.Info("Waiting until peers exchange messages")
   768  
   769  	select {
   770  	case <-readyCh:
   771  		{
   772  			logger.Info("Done!!!")
   773  
   774  		}
   775  	case <-time.After(time.Duration(10) * time.Second):
   776  		{
   777  			t.Fail()
   778  		}
   779  	}
   780  }
   781  
   782  // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into
   783  // local ledger, next spawning a new peer waiting for anti-entropy procedure to
   784  // complete missing blocks. Since state transfer messages now batched, it is expected
   785  // to see _exactly_ two messages with state transfer response.
   786  func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) {
   787  	viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node")
   788  	ledgermgmt.InitializeTestEnv()
   789  	defer ledgermgmt.CleanupTestEnv()
   790  
   791  	bootPeer := newPeerNode(newGossipConfig(0), newCommitter(0), noopPeerIdentityAcceptor)
   792  	defer bootPeer.shutdown()
   793  
   794  	msgCount := defAntiEntropyBatchSize + 5
   795  	expectedMessagesCnt := 2
   796  
   797  	for i := 1; i <= msgCount; i++ {
   798  		rawblock := pcomm.NewBlock(uint64(i), []byte{})
   799  		if b, err := pb.Marshal(rawblock); err == nil {
   800  			payload := &proto.Payload{
   801  				SeqNum: uint64(i),
   802  				Data:   b,
   803  			}
   804  			bootPeer.s.AddPayload(payload)
   805  		} else {
   806  			t.Fail()
   807  		}
   808  	}
   809  
   810  	peer := newPeerNode(newGossipConfig(1, 0), newCommitter(1), noopPeerIdentityAcceptor)
   811  	defer peer.shutdown()
   812  
   813  	naiveStateMsgPredicate := func(message interface{}) bool {
   814  		return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage()
   815  	}
   816  	_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
   817  
   818  	messageCh := make(chan struct{})
   819  	stopWaiting := make(chan struct{})
   820  
   821  	// Number of submitted messages is defAntiEntropyBatchSize + 5, therefore
   822  	// expected number of batches is expectedMessagesCnt = 2. Following go routine
   823  	// makes sure it receives expected amount of messages and sends signal of success
   824  	// to continue the test
   825  	go func(expected int) {
   826  		cnt := 0
   827  		for cnt < expected {
   828  			select {
   829  			case <-peerCh:
   830  				{
   831  					cnt++
   832  				}
   833  
   834  			case <-stopWaiting:
   835  				{
   836  					return
   837  				}
   838  			}
   839  		}
   840  
   841  		messageCh <- struct{}{}
   842  	}(expectedMessagesCnt)
   843  
   844  	// Waits for message which indicates that expected number of message batches received
   845  	// otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds
   846  	select {
   847  	case <-messageCh:
   848  		{
   849  			// Once we got message which indicate of two batches being received,
   850  			// making sure messages indeed committed.
   851  			waitUntilTrueOrTimeout(t, func() bool {
   852  				if len(peer.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != 1 {
   853  					logger.Debug("Peer discovery has not finished yet")
   854  					return false
   855  				}
   856  				logger.Debug("All peer discovered each other!!!")
   857  				return true
   858  			}, 30*time.Second)
   859  
   860  			logger.Debug("Waiting for all blocks to arrive.")
   861  			waitUntilTrueOrTimeout(t, func() bool {
   862  				logger.Debug("Trying to see all peers get all blocks")
   863  				height, err := peer.commit.LedgerHeight()
   864  				if height != uint64(msgCount+1) || err != nil {
   865  					return false
   866  				}
   867  				logger.Debug("All peers have same ledger height!!!")
   868  				return true
   869  			}, 60*time.Second)
   870  		}
   871  	case <-time.After(defAntiEntropyInterval*2 + time.Second*1):
   872  		{
   873  			close(stopWaiting)
   874  			t.Fatal("Expected to receive two batches with missing payloads")
   875  		}
   876  	}
   877  }
   878  
   879  func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) {
   880  	ch := make(chan struct{})
   881  	go func() {
   882  		logger.Debug("Started to spin off, until predicate will be satisfied.")
   883  		for !predicate() {
   884  			time.Sleep(1 * time.Second)
   885  		}
   886  		ch <- struct{}{}
   887  		logger.Debug("Done.")
   888  	}()
   889  
   890  	select {
   891  	case <-ch:
   892  		break
   893  	case <-time.After(timeout):
   894  		t.Fatal("Timeout has expired")
   895  		break
   896  	}
   897  	logger.Debug("Stop waiting until timeout or true")
   898  }