github.com/leonlxy/hyperledger@v1.0.0-alpha.0.20170427033203-34922035d248/gossip/service/gossip_service_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8                   http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package service
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"net"
    23  	"sync"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/hyperledger/fabric/common/config"
    28  	"github.com/hyperledger/fabric/common/localmsp"
    29  	"github.com/hyperledger/fabric/core/deliverservice"
    30  	"github.com/hyperledger/fabric/core/deliverservice/blocksprovider"
    31  	"github.com/hyperledger/fabric/gossip/api"
    32  	gossipCommon "github.com/hyperledger/fabric/gossip/common"
    33  	"github.com/hyperledger/fabric/gossip/election"
    34  	"github.com/hyperledger/fabric/gossip/gossip"
    35  	"github.com/hyperledger/fabric/gossip/identity"
    36  	"github.com/hyperledger/fabric/gossip/state"
    37  	"github.com/hyperledger/fabric/gossip/util"
    38  	"github.com/hyperledger/fabric/msp/mgmt"
    39  	"github.com/hyperledger/fabric/msp/mgmt/testtools"
    40  	"github.com/hyperledger/fabric/peer/gossip/mcs"
    41  	"github.com/hyperledger/fabric/protos/common"
    42  	"github.com/hyperledger/fabric/protos/peer"
    43  	"github.com/op/go-logging"
    44  	"github.com/spf13/viper"
    45  	"github.com/stretchr/testify/assert"
    46  	"google.golang.org/grpc"
    47  )
    48  
    49  func init() {
    50  	util.SetupTestLogging()
    51  }
    52  
    53  func TestInitGossipService(t *testing.T) {
    54  	// Test whenever gossip service is indeed singleton
    55  	grpcServer := grpc.NewServer()
    56  	socket, error := net.Listen("tcp", fmt.Sprintf("%s:%d", "", 5611))
    57  	assert.NoError(t, error)
    58  
    59  	go grpcServer.Serve(socket)
    60  	defer grpcServer.Stop()
    61  
    62  	msptesttools.LoadMSPSetupForTesting()
    63  	identity, _ := mgmt.GetLocalSigningIdentityOrPanic().Serialize()
    64  
    65  	wg := sync.WaitGroup{}
    66  	wg.Add(10)
    67  	for i := 0; i < 10; i++ {
    68  		go func() {
    69  			messageCryptoService := mcs.New(&mcs.MockChannelPolicyManagerGetter{}, localmsp.NewSigner(), mgmt.NewDeserializersManager())
    70  			InitGossipService(identity, "localhost:5611", grpcServer, messageCryptoService)
    71  
    72  			wg.Done()
    73  		}()
    74  	}
    75  	wg.Wait()
    76  
    77  	defer GetGossipService().Stop()
    78  	gossip := GetGossipService()
    79  
    80  	for i := 0; i < 10; i++ {
    81  		go func(gossipInstance GossipService) {
    82  			assert.Equal(t, gossip, GetGossipService())
    83  		}(gossip)
    84  	}
    85  
    86  	time.Sleep(time.Second * 2)
    87  }
    88  
    89  // Make sure *joinChannelMessage implements the api.JoinChannelMessage
    90  func TestJCMInterface(t *testing.T) {
    91  	_ = api.JoinChannelMessage(&joinChannelMessage{})
    92  }
    93  
    94  func TestLeaderElectionWithDeliverClient(t *testing.T) {
    95  
    96  	//Test check if leader election works with mock deliver service instance
    97  	//Configuration set to use dynamic leader election
    98  	//10 peers started, added to channel and at the end we check if only for one peer
    99  	//mockDeliverService.StartDeliverForChannel was invoked
   100  
   101  	viper.Set("peer.gossip.useLeaderElection", true)
   102  	viper.Set("peer.gossip.orgLeader", false)
   103  
   104  	n := 10
   105  	gossips := startPeers(t, n, 20000)
   106  
   107  	channelName := "chanA"
   108  	peerIndexes := make([]int, n)
   109  	for i := 0; i < n; i++ {
   110  		peerIndexes[i] = i
   111  	}
   112  	addPeersToChannel(t, n, 20000, channelName, gossips, peerIndexes)
   113  
   114  	waitForFullMembership(t, gossips, n, time.Second*20, time.Second*2)
   115  
   116  	services := make([]*electionService, n)
   117  
   118  	for i := 0; i < n; i++ {
   119  		deliverServiceFactory := &mockDeliverServiceFactory{
   120  			service: &mockDeliverService{
   121  				running: make(map[string]bool),
   122  			},
   123  		}
   124  		gossips[i].(*gossipServiceImpl).deliveryFactory = deliverServiceFactory
   125  		deliverServiceFactory.service.running[channelName] = false
   126  
   127  		gossips[i].InitializeChannel(channelName, &mockLedgerInfo{1}, []string{"localhost:5005"})
   128  		service, exist := gossips[i].(*gossipServiceImpl).leaderElection[channelName]
   129  		assert.True(t, exist, "Leader election service should be created for peer %d and channel %s", i, channelName)
   130  		services[i] = &electionService{nil, false, 0}
   131  		services[i].LeaderElectionService = service
   132  	}
   133  
   134  	// Is single leader was elected.
   135  	assert.True(t, waitForLeaderElection(t, services, time.Second*30, time.Second*2), "One leader should be selected")
   136  
   137  	startsNum := 0
   138  	for i := 0; i < n; i++ {
   139  		// Is mockDeliverService.StartDeliverForChannel in current peer for the specific channel was invoked
   140  		if gossips[i].(*gossipServiceImpl).deliveryService.(*mockDeliverService).running[channelName] {
   141  			startsNum++
   142  		}
   143  	}
   144  
   145  	assert.Equal(t, 1, startsNum, "Only for one peer delivery client should start")
   146  
   147  	stopPeers(gossips)
   148  }
   149  
   150  func TestWithStaticDeliverClientLeader(t *testing.T) {
   151  
   152  	//Tests check if static leader flag works ok.
   153  	//Leader election flag set to false, and static leader flag set to true
   154  	//Two gossip service instances (peers) created.
   155  	//Each peer is added to channel and should run mock delivery client
   156  	//After that each peer added to another client and it should run deliver client for this channel as well.
   157  
   158  	viper.Set("peer.gossip.useLeaderElection", false)
   159  	viper.Set("peer.gossip.orgLeader", true)
   160  
   161  	n := 2
   162  	gossips := startPeers(t, n, 20000)
   163  
   164  	channelName := "chanA"
   165  	peerIndexes := make([]int, n)
   166  	for i := 0; i < n; i++ {
   167  		peerIndexes[i] = i
   168  	}
   169  
   170  	addPeersToChannel(t, n, 20000, channelName, gossips, peerIndexes)
   171  
   172  	waitForFullMembership(t, gossips, n, time.Second*30, time.Second*2)
   173  
   174  	deliverServiceFactory := &mockDeliverServiceFactory{
   175  		service: &mockDeliverService{
   176  			running: make(map[string]bool),
   177  		},
   178  	}
   179  
   180  	for i := 0; i < n; i++ {
   181  		gossips[i].(*gossipServiceImpl).deliveryFactory = deliverServiceFactory
   182  		deliverServiceFactory.service.running[channelName] = false
   183  		gossips[i].InitializeChannel(channelName, &mockLedgerInfo{1}, []string{"localhost:5005"})
   184  	}
   185  
   186  	for i := 0; i < n; i++ {
   187  		assert.NotNil(t, gossips[i].(*gossipServiceImpl).deliveryService, "Delivery service not initiated in peer %d", i)
   188  		assert.True(t, gossips[i].(*gossipServiceImpl).deliveryService.(*mockDeliverService).running[channelName], "Block deliverer not started for peer %d", i)
   189  	}
   190  
   191  	channelName = "chanB"
   192  	for i := 0; i < n; i++ {
   193  		deliverServiceFactory.service.running[channelName] = false
   194  		gossips[i].InitializeChannel(channelName, &mockLedgerInfo{1}, []string{"localhost:5005"})
   195  	}
   196  
   197  	for i := 0; i < n; i++ {
   198  		assert.NotNil(t, gossips[i].(*gossipServiceImpl).deliveryService, "Delivery service not initiated in peer %d", i)
   199  		assert.True(t, gossips[i].(*gossipServiceImpl).deliveryService.(*mockDeliverService).running[channelName], "Block deliverer not started for peer %d", i)
   200  	}
   201  
   202  	stopPeers(gossips)
   203  }
   204  
   205  func TestWithStaticDeliverClientNotLeader(t *testing.T) {
   206  	viper.Set("peer.gossip.useLeaderElection", false)
   207  	viper.Set("peer.gossip.orgLeader", false)
   208  
   209  	n := 2
   210  	gossips := startPeers(t, n, 20000)
   211  
   212  	channelName := "chanA"
   213  	peerIndexes := make([]int, n)
   214  	for i := 0; i < n; i++ {
   215  		peerIndexes[i] = i
   216  	}
   217  
   218  	addPeersToChannel(t, n, 20000, channelName, gossips, peerIndexes)
   219  
   220  	waitForFullMembership(t, gossips, n, time.Second*30, time.Second*2)
   221  
   222  	deliverServiceFactory := &mockDeliverServiceFactory{
   223  		service: &mockDeliverService{
   224  			running: make(map[string]bool),
   225  		},
   226  	}
   227  
   228  	for i := 0; i < n; i++ {
   229  		gossips[i].(*gossipServiceImpl).deliveryFactory = deliverServiceFactory
   230  		deliverServiceFactory.service.running[channelName] = false
   231  		gossips[i].InitializeChannel(channelName, &mockLedgerInfo{1}, []string{"localhost:5005"})
   232  	}
   233  
   234  	for i := 0; i < n; i++ {
   235  		assert.NotNil(t, gossips[i].(*gossipServiceImpl).deliveryService, "Delivery service not initiated in peer %d", i)
   236  		assert.False(t, gossips[i].(*gossipServiceImpl).deliveryService.(*mockDeliverService).running[channelName], "Block deliverer should not be started for peer %d", i)
   237  	}
   238  
   239  	stopPeers(gossips)
   240  }
   241  
   242  func TestWithStaticDeliverClientBothStaticAndLeaderElection(t *testing.T) {
   243  	viper.Set("peer.gossip.useLeaderElection", true)
   244  	viper.Set("peer.gossip.orgLeader", true)
   245  
   246  	n := 2
   247  	gossips := startPeers(t, n, 20000)
   248  
   249  	channelName := "chanA"
   250  	peerIndexes := make([]int, n)
   251  	for i := 0; i < n; i++ {
   252  		peerIndexes[i] = i
   253  	}
   254  
   255  	addPeersToChannel(t, n, 20000, channelName, gossips, peerIndexes)
   256  
   257  	waitForFullMembership(t, gossips, n, time.Second*30, time.Second*2)
   258  
   259  	deliverServiceFactory := &mockDeliverServiceFactory{
   260  		service: &mockDeliverService{
   261  			running: make(map[string]bool),
   262  		},
   263  	}
   264  
   265  	for i := 0; i < n; i++ {
   266  		gossips[i].(*gossipServiceImpl).deliveryFactory = deliverServiceFactory
   267  		assert.Panics(t, func() {
   268  			gossips[i].InitializeChannel(channelName, &mockLedgerInfo{1}, []string{"localhost:5005"})
   269  		}, "Dynamic leader lection based and static connection to ordering service can't exist simultaniosly")
   270  	}
   271  
   272  	stopPeers(gossips)
   273  }
   274  
   275  type mockDeliverServiceFactory struct {
   276  	service *mockDeliverService
   277  }
   278  
   279  func (mf *mockDeliverServiceFactory) Service(g GossipService, endpoints []string, mcs api.MessageCryptoService) (deliverclient.DeliverService, error) {
   280  	return mf.service, nil
   281  }
   282  
   283  type mockDeliverService struct {
   284  	running map[string]bool
   285  }
   286  
   287  func (ds *mockDeliverService) StartDeliverForChannel(chainID string, ledgerInfo blocksprovider.LedgerInfo) error {
   288  	ds.running[chainID] = true
   289  	return nil
   290  }
   291  
   292  func (ds *mockDeliverService) StopDeliverForChannel(chainID string) error {
   293  	ds.running[chainID] = false
   294  	return nil
   295  }
   296  
   297  func (ds *mockDeliverService) Stop() {
   298  }
   299  
   300  type mockLedgerInfo struct {
   301  	Height uint64
   302  }
   303  
   304  // LedgerHeight returns mocked value to the ledger height
   305  func (li *mockLedgerInfo) LedgerHeight() (uint64, error) {
   306  	return li.Height, nil
   307  }
   308  
   309  // Commit block to the ledger
   310  func (li *mockLedgerInfo) Commit(block *common.Block) error {
   311  	return nil
   312  }
   313  
   314  // Gets blocks with sequence numbers provided in the slice
   315  func (li *mockLedgerInfo) GetBlocks(blockSeqs []uint64) []*common.Block {
   316  	return make([]*common.Block, 0)
   317  }
   318  
   319  // Closes committing service
   320  func (li *mockLedgerInfo) Close() {
   321  }
   322  
   323  func TestLeaderElectionWithRealGossip(t *testing.T) {
   324  
   325  	// Spawn 10 gossip instances with single channel and inside same organization
   326  	// Run leader election on top of each gossip instance and check that only one leader chosen
   327  	// Create another channel includes sub-set of peers over same gossip instances {1,3,5,7}
   328  	// Run additional leader election services for new channel
   329  	// Check correct leader still exist for first channel and new correct leader chosen in second channel
   330  	// Stop gossip instances of leader peers for both channels and see that new leader chosen for both
   331  
   332  	logging.SetLevel(logging.DEBUG, util.LoggingElectionModule)
   333  	logging.SetLevel(logging.DEBUG, util.LoggingServiceModule)
   334  
   335  	// Creating gossip service instances for peers
   336  	n := 10
   337  	gossips := startPeers(t, n, 20000)
   338  
   339  	// Joining all peers to first channel
   340  	channelName := "chanA"
   341  	peerIndexes := make([]int, n)
   342  	for i := 0; i < n; i++ {
   343  		peerIndexes[i] = i
   344  	}
   345  	addPeersToChannel(t, n, 20000, channelName, gossips, peerIndexes)
   346  
   347  	waitForFullMembership(t, gossips, n, time.Second*30, time.Second*2)
   348  
   349  	logger.Warning("Starting leader election services")
   350  
   351  	//Starting leader election services
   352  	services := make([]*electionService, n)
   353  
   354  	for i := 0; i < n; i++ {
   355  		services[i] = &electionService{nil, false, 0}
   356  		services[i].LeaderElectionService = gossips[i].(*gossipServiceImpl).newLeaderElectionComponent(channelName, services[i].callback)
   357  	}
   358  
   359  	logger.Warning("Waiting for leader election")
   360  
   361  	assert.True(t, waitForLeaderElection(t, services, time.Second*30, time.Second*2), "One leader should be selected")
   362  
   363  	startsNum := 0
   364  	for i := 0; i < n; i++ {
   365  		// Is callback function was invoked by this leader election service instance
   366  		if services[i].callbackInvokeRes {
   367  			startsNum++
   368  		}
   369  	}
   370  	//Only leader should invoke callback function, so it is double check that only one leader exists
   371  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanA")
   372  
   373  	// Adding some peers to new channel and creating leader election services for peers in new channel
   374  	// Expecting peer 1 (first in list of election services) to become leader of second channel
   375  	secondChannelPeerIndexes := []int{1, 3, 5, 7}
   376  	secondChannelName := "chanB"
   377  	secondChannelServices := make([]*electionService, len(secondChannelPeerIndexes))
   378  	addPeersToChannel(t, n, 20000, secondChannelName, gossips, secondChannelPeerIndexes)
   379  
   380  	for idx, i := range secondChannelPeerIndexes {
   381  		secondChannelServices[idx] = &electionService{nil, false, 0}
   382  		secondChannelServices[idx].LeaderElectionService = gossips[i].(*gossipServiceImpl).newLeaderElectionComponent(secondChannelName, secondChannelServices[idx].callback)
   383  	}
   384  
   385  	assert.True(t, waitForLeaderElection(t, secondChannelServices, time.Second*30, time.Second*2), "One leader should be selected for chanB")
   386  	assert.True(t, waitForLeaderElection(t, services, time.Second*30, time.Second*2), "One leader should be selected for chanA")
   387  
   388  	startsNum = 0
   389  	for i := 0; i < n; i++ {
   390  		if services[i].callbackInvokeRes {
   391  			startsNum++
   392  		}
   393  	}
   394  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanA")
   395  
   396  	startsNum = 0
   397  	for i := 0; i < len(secondChannelServices); i++ {
   398  		if secondChannelServices[i].callbackInvokeRes {
   399  			startsNum++
   400  		}
   401  	}
   402  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanB")
   403  
   404  	//Stopping 2 gossip instances(peer 0 and peer 1), should init re-election
   405  	//Now peer 2 become leader for first channel and peer 3 for second channel
   406  
   407  	logger.Warning("Killing 2 peers, initiation new leader election")
   408  
   409  	stopPeers(gossips[:2])
   410  
   411  	waitForFullMembership(t, gossips[2:], n-2, time.Second*30, time.Second*2)
   412  
   413  	assert.True(t, waitForLeaderElection(t, services[2:], time.Second*30, time.Second*2), "One leader should be selected after re-election - chanA")
   414  	assert.True(t, waitForLeaderElection(t, secondChannelServices[1:], time.Second*30, time.Second*2), "One leader should be selected after re-election - chanB")
   415  
   416  	startsNum = 0
   417  	for i := 2; i < n; i++ {
   418  		if services[i].callbackInvokeRes {
   419  			startsNum++
   420  		}
   421  	}
   422  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called after re-election - chanA")
   423  
   424  	startsNum = 0
   425  	for i := 1; i < len(secondChannelServices); i++ {
   426  		if secondChannelServices[i].callbackInvokeRes {
   427  			startsNum++
   428  		}
   429  	}
   430  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called after re-election - chanB")
   431  
   432  	stopServices(secondChannelServices)
   433  	stopServices(services)
   434  	stopPeers(gossips[2:])
   435  }
   436  
   437  type electionService struct {
   438  	election.LeaderElectionService
   439  	callbackInvokeRes   bool
   440  	callbackInvokeCount int
   441  }
   442  
   443  func (es *electionService) callback(isLeader bool) {
   444  	es.callbackInvokeRes = isLeader
   445  	es.callbackInvokeCount = es.callbackInvokeCount + 1
   446  }
   447  
   448  type joinChanMsg struct {
   449  }
   450  
   451  // SequenceNumber returns the sequence number of the block this joinChanMsg
   452  // is derived from
   453  func (jmc *joinChanMsg) SequenceNumber() uint64 {
   454  	return uint64(time.Now().UnixNano())
   455  }
   456  
   457  // Members returns the organizations of the channel
   458  func (jmc *joinChanMsg) Members() []api.OrgIdentityType {
   459  	return []api.OrgIdentityType{orgInChannelA}
   460  }
   461  
   462  // AnchorPeersOf returns the anchor peers of the given organization
   463  func (jmc *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
   464  	return []api.AnchorPeer{}
   465  }
   466  
   467  func waitForFullMembership(t *testing.T, gossips []GossipService, peersNum int, timeout time.Duration, testPollInterval time.Duration) bool {
   468  	end := time.Now().Add(timeout)
   469  	var correctPeers int
   470  	for time.Now().Before(end) {
   471  		correctPeers = 0
   472  		for _, g := range gossips {
   473  			if len(g.Peers()) == (peersNum - 1) {
   474  				correctPeers++
   475  			}
   476  		}
   477  		if correctPeers == peersNum {
   478  			return true
   479  		}
   480  		time.Sleep(testPollInterval)
   481  	}
   482  	logger.Warningf("Only %d peers have full membership", correctPeers)
   483  	return false
   484  }
   485  
   486  func waitForMultipleLeadersElection(t *testing.T, services []*electionService, leadersNum int, timeout time.Duration, testPollInterval time.Duration) bool {
   487  	logger.Warning("Waiting for", leadersNum, "leaders")
   488  	end := time.Now().Add(timeout)
   489  	correctNumberOfLeadersFound := false
   490  	leaders := 0
   491  	for time.Now().Before(end) {
   492  		leaders = 0
   493  		for _, s := range services {
   494  			if s.IsLeader() {
   495  				leaders++
   496  			}
   497  		}
   498  		if leaders == leadersNum {
   499  			if correctNumberOfLeadersFound {
   500  				return true
   501  			}
   502  			correctNumberOfLeadersFound = true
   503  		} else {
   504  			correctNumberOfLeadersFound = false
   505  		}
   506  		time.Sleep(testPollInterval)
   507  	}
   508  	logger.Warning("Incorrect number of leaders", leaders)
   509  	for i, s := range services {
   510  		logger.Warning("Peer at index", i, "is leader", s.IsLeader())
   511  	}
   512  	return false
   513  }
   514  
   515  func waitForLeaderElection(t *testing.T, services []*electionService, timeout time.Duration, testPollInterval time.Duration) bool {
   516  	return waitForMultipleLeadersElection(t, services, 1, timeout, testPollInterval)
   517  }
   518  
   519  func waitUntilOrFailBlocking(t *testing.T, f func(), timeout time.Duration) {
   520  	successChan := make(chan struct{}, 1)
   521  	go func() {
   522  		f()
   523  		successChan <- struct{}{}
   524  	}()
   525  	select {
   526  	case <-time.NewTimer(timeout).C:
   527  		break
   528  	case <-successChan:
   529  		return
   530  	}
   531  	util.PrintStackTrace()
   532  	assert.Fail(t, "Timeout expired!")
   533  }
   534  
   535  func stopServices(services []*electionService) {
   536  	stoppingWg := sync.WaitGroup{}
   537  	stoppingWg.Add(len(services))
   538  	for i, sI := range services {
   539  		go func(i int, s_i election.LeaderElectionService) {
   540  			defer stoppingWg.Done()
   541  			s_i.Stop()
   542  		}(i, sI)
   543  	}
   544  	stoppingWg.Wait()
   545  	time.Sleep(time.Second * time.Duration(2))
   546  }
   547  
   548  func stopPeers(peers []GossipService) {
   549  	stoppingWg := sync.WaitGroup{}
   550  	stoppingWg.Add(len(peers))
   551  	for i, pI := range peers {
   552  		go func(i int, p_i GossipService) {
   553  			defer stoppingWg.Done()
   554  			p_i.Stop()
   555  		}(i, pI)
   556  	}
   557  	stoppingWg.Wait()
   558  	time.Sleep(time.Second * time.Duration(2))
   559  }
   560  
   561  func addPeersToChannel(t *testing.T, n int, portPrefix int, channel string, peers []GossipService, peerIndexes []int) {
   562  	jcm := &joinChanMsg{}
   563  
   564  	wg := sync.WaitGroup{}
   565  	for _, i := range peerIndexes {
   566  		wg.Add(1)
   567  		go func(i int) {
   568  			peers[i].JoinChan(jcm, gossipCommon.ChainID(channel))
   569  			peers[i].UpdateChannelMetadata([]byte("bla bla"), gossipCommon.ChainID(channel))
   570  			wg.Done()
   571  		}(i)
   572  	}
   573  	waitUntilOrFailBlocking(t, wg.Wait, time.Second*10)
   574  }
   575  
   576  func startPeers(t *testing.T, n int, portPrefix int) []GossipService {
   577  
   578  	peers := make([]GossipService, n)
   579  	wg := sync.WaitGroup{}
   580  	for i := 0; i < n; i++ {
   581  		wg.Add(1)
   582  		go func(i int) {
   583  
   584  			peers[i] = newGossipInstance(portPrefix, i, 100, 0, 1, 2, 3, 4, 5)
   585  			wg.Done()
   586  		}(i)
   587  	}
   588  	waitUntilOrFailBlocking(t, wg.Wait, time.Second*10)
   589  
   590  	return peers
   591  }
   592  
   593  func newGossipInstance(portPrefix int, id int, maxMsgCount int, boot ...int) GossipService {
   594  	port := id + portPrefix
   595  	conf := &gossip.Config{
   596  		BindPort:                   port,
   597  		BootstrapPeers:             bootPeers(portPrefix, boot...),
   598  		ID:                         fmt.Sprintf("p%d", id),
   599  		MaxBlockCountToStore:       maxMsgCount,
   600  		MaxPropagationBurstLatency: time.Duration(500) * time.Millisecond,
   601  		MaxPropagationBurstSize:    20,
   602  		PropagateIterations:        1,
   603  		PropagatePeerNum:           3,
   604  		PullInterval:               time.Duration(2) * time.Second,
   605  		PullPeerNum:                5,
   606  		InternalEndpoint:           fmt.Sprintf("localhost:%d", port),
   607  		ExternalEndpoint:           fmt.Sprintf("1.2.3.4:%d", port),
   608  		PublishCertPeriod:          time.Duration(4) * time.Second,
   609  		PublishStateInfoInterval:   time.Duration(1) * time.Second,
   610  		RequestStateInfoInterval:   time.Duration(1) * time.Second,
   611  	}
   612  	cryptoService := &naiveCryptoService{}
   613  	idMapper := identity.NewIdentityMapper(cryptoService)
   614  
   615  	gossip := gossip.NewGossipServiceWithServer(conf, &orgCryptoService{}, cryptoService, idMapper, api.PeerIdentityType(conf.InternalEndpoint))
   616  
   617  	gossipService := &gossipServiceImpl{
   618  		gossipSvc:       gossip,
   619  		chains:          make(map[string]state.GossipStateProvider),
   620  		leaderElection:  make(map[string]election.LeaderElectionService),
   621  		deliveryFactory: &deliveryFactoryImpl{},
   622  		idMapper:        idMapper,
   623  		peerIdentity:    api.PeerIdentityType(conf.InternalEndpoint),
   624  	}
   625  
   626  	return gossipService
   627  }
   628  
   629  func bootPeers(portPrefix int, ids ...int) []string {
   630  	peers := []string{}
   631  	for _, id := range ids {
   632  		peers = append(peers, fmt.Sprintf("localhost:%d", id+portPrefix))
   633  	}
   634  	return peers
   635  }
   636  
   637  type naiveCryptoService struct {
   638  }
   639  
   640  type orgCryptoService struct {
   641  }
   642  
   643  // OrgByPeerIdentity returns the OrgIdentityType
   644  // of a given peer identity
   645  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
   646  	return orgInChannelA
   647  }
   648  
   649  // Verify verifies a JoinChanMessage, returns nil on success,
   650  // and an error on failure
   651  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
   652  	return nil
   653  }
   654  
   655  // VerifyByChannel verifies a peer's signature on a message in the context
   656  // of a specific channel
   657  func (*naiveCryptoService) VerifyByChannel(_ gossipCommon.ChainID, _ api.PeerIdentityType, _, _ []byte) error {
   658  	return nil
   659  }
   660  
   661  func (*naiveCryptoService) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   662  	return nil
   663  }
   664  
   665  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   666  func (*naiveCryptoService) GetPKIidOfCert(peerIdentity api.PeerIdentityType) gossipCommon.PKIidType {
   667  	return gossipCommon.PKIidType(peerIdentity)
   668  }
   669  
   670  // VerifyBlock returns nil if the block is properly signed,
   671  // else returns error
   672  func (*naiveCryptoService) VerifyBlock(chainID gossipCommon.ChainID, signedBlock []byte) error {
   673  	return nil
   674  }
   675  
   676  // Sign signs msg with this peer's signing key and outputs
   677  // the signature if no error occurred.
   678  func (*naiveCryptoService) Sign(msg []byte) ([]byte, error) {
   679  	return msg, nil
   680  }
   681  
   682  // Verify checks that signature is a valid signature of message under a peer's verification key.
   683  // If the verification succeeded, Verify returns nil meaning no error occurred.
   684  // If peerCert is nil, then the signature is verified against this peer's verification key.
   685  func (*naiveCryptoService) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   686  	equal := bytes.Equal(signature, message)
   687  	if !equal {
   688  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   689  	}
   690  	return nil
   691  }
   692  
   693  var orgInChannelA = api.OrgIdentityType("ORG1")
   694  
   695  func TestInvalidInitialization(t *testing.T) {
   696  	// Test whenever gossip service is indeed singleton
   697  	grpcServer := grpc.NewServer()
   698  	socket, error := net.Listen("tcp", fmt.Sprintf("%s:%d", "", 7611))
   699  	assert.NoError(t, error)
   700  
   701  	go grpcServer.Serve(socket)
   702  	defer grpcServer.Stop()
   703  
   704  	InitGossipService(api.PeerIdentityType("IDENTITY"), "localhost:7611", grpcServer, &naiveCryptoService{})
   705  	gService := GetGossipService().(*gossipServiceImpl)
   706  	defer gService.Stop()
   707  
   708  	dc, err := gService.deliveryFactory.Service(gService, []string{}, &naiveCryptoService{})
   709  	assert.Nil(t, dc)
   710  	assert.Error(t, err)
   711  
   712  	dc, err = gService.deliveryFactory.Service(gService, []string{"localhost:1984"}, &naiveCryptoService{})
   713  	assert.NotNil(t, dc)
   714  	assert.NoError(t, err)
   715  }
   716  
   717  func TestChannelConfig(t *testing.T) {
   718  	// Test whenever gossip service is indeed singleton
   719  	grpcServer := grpc.NewServer()
   720  	socket, error := net.Listen("tcp", fmt.Sprintf("%s:%d", "", 6611))
   721  	assert.NoError(t, error)
   722  
   723  	go grpcServer.Serve(socket)
   724  	defer grpcServer.Stop()
   725  
   726  	InitGossipService(api.PeerIdentityType("IDENTITY"), "localhost:6611", grpcServer, &naiveCryptoService{})
   727  	gService := GetGossipService().(*gossipServiceImpl)
   728  	defer gService.Stop()
   729  
   730  	jcm := &joinChannelMessage{seqNum: 1, members2AnchorPeers: map[string][]api.AnchorPeer{
   731  		"A": {{Host: "host", Port: 5000}},
   732  	}}
   733  
   734  	assert.Equal(t, uint64(1), jcm.SequenceNumber())
   735  
   736  	mc := &mockConfig{
   737  		sequence: 1,
   738  		orgs: map[string]config.ApplicationOrg{
   739  			testOrgID: applicationOrgs([]*peer.AnchorPeer{}),
   740  		},
   741  	}
   742  	gService.JoinChan(jcm, gossipCommon.ChainID("A"))
   743  	gService.configUpdated(mc)
   744  	assert.True(t, gService.amIinChannel(string(orgInChannelA), mc))
   745  }