github.com/yacovm/fabric@v2.0.0-alpha.0.20191128145320-c5d4087dc723+incompatible/gossip/service/gossip_service_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package service
     8  
     9  import (
    10  	"bytes"
    11  	"fmt"
    12  	"io/ioutil"
    13  	"net"
    14  	"os"
    15  	"sync"
    16  	"testing"
    17  	"time"
    18  
    19  	"github.com/hyperledger/fabric-protos-go/common"
    20  	"github.com/hyperledger/fabric-protos-go/peer"
    21  	transientstore2 "github.com/hyperledger/fabric-protos-go/transientstore"
    22  	"github.com/hyperledger/fabric/bccsp/sw"
    23  	"github.com/hyperledger/fabric/common/channelconfig"
    24  	"github.com/hyperledger/fabric/common/flogging"
    25  	"github.com/hyperledger/fabric/common/metrics/disabled"
    26  	"github.com/hyperledger/fabric/core/comm"
    27  	"github.com/hyperledger/fabric/core/deliverservice"
    28  	"github.com/hyperledger/fabric/core/ledger"
    29  	"github.com/hyperledger/fabric/core/transientstore"
    30  	"github.com/hyperledger/fabric/gossip/api"
    31  	gcomm "github.com/hyperledger/fabric/gossip/comm"
    32  	gossipcommon "github.com/hyperledger/fabric/gossip/common"
    33  	"github.com/hyperledger/fabric/gossip/discovery"
    34  	"github.com/hyperledger/fabric/gossip/election"
    35  	"github.com/hyperledger/fabric/gossip/gossip"
    36  	"github.com/hyperledger/fabric/gossip/gossip/algo"
    37  	"github.com/hyperledger/fabric/gossip/gossip/channel"
    38  	gossipmetrics "github.com/hyperledger/fabric/gossip/metrics"
    39  	"github.com/hyperledger/fabric/gossip/privdata"
    40  	"github.com/hyperledger/fabric/gossip/state"
    41  	"github.com/hyperledger/fabric/gossip/util"
    42  	peergossip "github.com/hyperledger/fabric/internal/peer/gossip"
    43  	"github.com/hyperledger/fabric/internal/peer/gossip/mocks"
    44  	"github.com/hyperledger/fabric/internal/pkg/identity"
    45  	"github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider"
    46  	"github.com/hyperledger/fabric/internal/pkg/peer/orderers"
    47  	"github.com/hyperledger/fabric/msp/mgmt"
    48  	msptesttools "github.com/hyperledger/fabric/msp/mgmt/testtools"
    49  	"github.com/stretchr/testify/assert"
    50  	"github.com/stretchr/testify/require"
    51  	"google.golang.org/grpc"
    52  )
    53  
    54  func init() {
    55  	util.SetupTestLogging()
    56  }
    57  
    58  //go:generate counterfeiter -o mocks/signer_serializer.go --fake-name SignerSerializer . signerSerializer
    59  
    60  type signerSerializer interface {
    61  	identity.SignerSerializer
    62  }
    63  
    64  type testTransientStore struct {
    65  	storeProvider transientstore.StoreProvider
    66  	Store         *transientstore.Store
    67  	tempdir       string
    68  }
    69  
    70  func newTransientStore(t *testing.T) *testTransientStore {
    71  	s := &testTransientStore{}
    72  	var err error
    73  	s.tempdir, err = ioutil.TempDir("", "ts")
    74  	if err != nil {
    75  		t.Fatalf("Failed to create test directory, got err %s", err)
    76  		return s
    77  	}
    78  	s.storeProvider, err = transientstore.NewStoreProvider(s.tempdir)
    79  	if err != nil {
    80  		t.Fatalf("Failed to open store, got err %s", err)
    81  		return s
    82  	}
    83  	s.Store, err = s.storeProvider.OpenStore("test")
    84  	if err != nil {
    85  		t.Fatalf("Failed to open store, got err %s", err)
    86  		return s
    87  	}
    88  	return s
    89  }
    90  
    91  func (s *testTransientStore) tearDown() {
    92  	s.storeProvider.Close()
    93  	os.RemoveAll(s.tempdir)
    94  }
    95  
    96  func (s *testTransientStore) Persist(txid string, blockHeight uint64,
    97  	privateSimulationResultsWithConfig *transientstore2.TxPvtReadWriteSetWithConfigInfo) error {
    98  	return s.Store.Persist(txid, blockHeight, privateSimulationResultsWithConfig)
    99  }
   100  
   101  func (s *testTransientStore) GetTxPvtRWSetByTxid(txid string, filter ledger.PvtNsCollFilter) (privdata.RWSetScanner, error) {
   102  	return s.Store.GetTxPvtRWSetByTxid(txid, filter)
   103  }
   104  
   105  func TestInitGossipService(t *testing.T) {
   106  	grpcServer := grpc.NewServer()
   107  	endpoint, socket := getAvailablePort(t)
   108  
   109  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   110  	assert.NoError(t, err)
   111  
   112  	msptesttools.LoadMSPSetupForTesting()
   113  	signer := mgmt.GetLocalSigningIdentityOrPanic(cryptoProvider)
   114  
   115  	messageCryptoService := peergossip.NewMCS(&mocks.ChannelPolicyManagerGetter{}, signer, mgmt.NewDeserializersManager(cryptoProvider), cryptoProvider)
   116  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(cryptoProvider))
   117  	gossipConfig, err := gossip.GlobalConfig(endpoint, nil)
   118  	assert.NoError(t, err)
   119  
   120  	grpcClient, err := comm.NewGRPCClient(comm.ClientConfig{})
   121  	require.NoError(t, err)
   122  
   123  	gossipService, err := New(
   124  		signer,
   125  		gossipmetrics.NewGossipMetrics(&disabled.Provider{}),
   126  		endpoint,
   127  		grpcServer,
   128  		messageCryptoService,
   129  		secAdv,
   130  		nil,
   131  		comm.NewCredentialSupport(),
   132  		grpcClient,
   133  		gossipConfig,
   134  		&ServiceConfig{},
   135  		&deliverservice.DeliverServiceConfig{
   136  			ReConnectBackoffThreshold:   deliverservice.DefaultReConnectBackoffThreshold,
   137  			ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold,
   138  		},
   139  	)
   140  	assert.NoError(t, err)
   141  
   142  	go grpcServer.Serve(socket)
   143  	defer grpcServer.Stop()
   144  
   145  	defer gossipService.Stop()
   146  }
   147  
   148  // Make sure *joinChannelMessage implements the api.JoinChannelMessage
   149  func TestJCMInterface(t *testing.T) {
   150  	_ = api.JoinChannelMessage(&joinChannelMessage{})
   151  	t.Parallel()
   152  }
   153  
   154  func TestLeaderElectionWithDeliverClient(t *testing.T) {
   155  	t.Parallel()
   156  	//Test check if leader election works with mock deliver service instance
   157  	//Configuration set to use dynamic leader election
   158  	//10 peers started, added to channel and at the end we check if only for one peer
   159  	//mockDeliverService.StartDeliverForChannel was invoked
   160  
   161  	n := 10
   162  	serviceConfig := &ServiceConfig{
   163  		UseLeaderElection:                true,
   164  		OrgLeader:                        false,
   165  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   166  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   167  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   168  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   169  	}
   170  	gossips := startPeers(t, serviceConfig, n, 0, 1, 2, 3, 4)
   171  
   172  	channelName := "chanA"
   173  	peerIndexes := make([]int, n)
   174  	for i := 0; i < n; i++ {
   175  		peerIndexes[i] = i
   176  	}
   177  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   178  
   179  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*20, time.Second*2)
   180  
   181  	services := make([]*electionService, n)
   182  
   183  	store := newTransientStore(t)
   184  	defer store.tearDown()
   185  
   186  	for i := 0; i < n; i++ {
   187  		deliverServiceFactory := &mockDeliverServiceFactory{
   188  			service: &mockDeliverService{
   189  				running: make(map[string]bool),
   190  			},
   191  		}
   192  		gossips[i].deliveryFactory = deliverServiceFactory
   193  		deliverServiceFactory.service.running[channelName] = false
   194  
   195  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   196  			Committer: &mockLedgerInfo{1},
   197  		})
   198  		service, exist := gossips[i].leaderElection[channelName]
   199  		assert.True(t, exist, "Leader election service should be created for peer %d and channel %s", i, channelName)
   200  		services[i] = &electionService{nil, false, 0}
   201  		services[i].LeaderElectionService = service
   202  	}
   203  
   204  	// Is single leader was elected.
   205  	assert.True(t, waitForLeaderElection(t, services, time.Second*30, time.Second*2), "One leader should be selected")
   206  
   207  	startsNum := 0
   208  	for i := 0; i < n; i++ {
   209  		// Is mockDeliverService.StartDeliverForChannel in current peer for the specific channel was invoked
   210  		if gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName] {
   211  			startsNum++
   212  		}
   213  	}
   214  
   215  	assert.Equal(t, 1, startsNum, "Only for one peer delivery client should start")
   216  
   217  	stopPeers(gossips)
   218  }
   219  
   220  func TestWithStaticDeliverClientLeader(t *testing.T) {
   221  	// Tests check if static leader flag works ok.
   222  	// Leader election flag set to false, and static leader flag set to true
   223  	// Two gossip service instances (peers) created.
   224  	// Each peer is added to channel and should run mock delivery client
   225  	// After that each peer added to another client and it should run deliver client for this channel as well.
   226  
   227  	serviceConfig := &ServiceConfig{
   228  		UseLeaderElection:                false,
   229  		OrgLeader:                        true,
   230  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   231  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   232  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   233  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   234  	}
   235  	n := 2
   236  	gossips := startPeers(t, serviceConfig, n, 0, 1)
   237  	channelName := "chanA"
   238  	peerIndexes := make([]int, n)
   239  	for i := 0; i < n; i++ {
   240  		peerIndexes[i] = i
   241  	}
   242  
   243  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   244  
   245  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*30, time.Second*2)
   246  
   247  	store := newTransientStore(t)
   248  	defer store.tearDown()
   249  
   250  	deliverServiceFactory := &mockDeliverServiceFactory{
   251  		service: &mockDeliverService{
   252  			running: make(map[string]bool),
   253  		},
   254  	}
   255  
   256  	for i := 0; i < n; i++ {
   257  		gossips[i].deliveryFactory = deliverServiceFactory
   258  		deliverServiceFactory.service.running[channelName] = false
   259  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   260  			Committer: &mockLedgerInfo{1},
   261  		})
   262  	}
   263  
   264  	for i := 0; i < n; i++ {
   265  		assert.NotNil(t, gossips[i].deliveryService[channelName], "Delivery service for channel %s not initiated in peer %d", channelName, i)
   266  		assert.True(t, gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName], "Block deliverer not started for peer %d", i)
   267  	}
   268  
   269  	channelName = "chanB"
   270  	for i := 0; i < n; i++ {
   271  		deliverServiceFactory.service.running[channelName] = false
   272  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   273  			Committer: &mockLedgerInfo{1},
   274  		})
   275  	}
   276  
   277  	for i := 0; i < n; i++ {
   278  		assert.NotNil(t, gossips[i].deliveryService[channelName], "Delivery service for channel %s not initiated in peer %d", channelName, i)
   279  		assert.True(t, gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName], "Block deliverer not started for peer %d", i)
   280  	}
   281  
   282  	stopPeers(gossips)
   283  }
   284  
   285  func TestWithStaticDeliverClientNotLeader(t *testing.T) {
   286  
   287  	serviceConfig := &ServiceConfig{
   288  		UseLeaderElection:                false,
   289  		OrgLeader:                        false,
   290  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   291  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   292  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   293  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   294  	}
   295  	n := 2
   296  	gossips := startPeers(t, serviceConfig, n, 0, 1)
   297  
   298  	channelName := "chanA"
   299  	peerIndexes := make([]int, n)
   300  	for i := 0; i < n; i++ {
   301  		peerIndexes[i] = i
   302  	}
   303  
   304  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   305  
   306  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*30, time.Second*2)
   307  
   308  	store := newTransientStore(t)
   309  	defer store.tearDown()
   310  
   311  	deliverServiceFactory := &mockDeliverServiceFactory{
   312  		service: &mockDeliverService{
   313  			running: make(map[string]bool),
   314  		},
   315  	}
   316  
   317  	for i := 0; i < n; i++ {
   318  		gossips[i].deliveryFactory = deliverServiceFactory
   319  		deliverServiceFactory.service.running[channelName] = false
   320  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   321  			Committer: &mockLedgerInfo{1},
   322  		})
   323  	}
   324  
   325  	for i := 0; i < n; i++ {
   326  		assert.NotNil(t, gossips[i].deliveryService[channelName], "Delivery service for channel %s not initiated in peer %d", channelName, i)
   327  		assert.False(t, gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName], "Block deliverer should not be started for peer %d", i)
   328  	}
   329  
   330  	stopPeers(gossips)
   331  }
   332  
   333  func TestWithStaticDeliverClientBothStaticAndLeaderElection(t *testing.T) {
   334  
   335  	serviceConfig := &ServiceConfig{
   336  		UseLeaderElection:                true,
   337  		OrgLeader:                        true,
   338  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   339  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   340  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   341  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   342  	}
   343  	n := 2
   344  	gossips := startPeers(t, serviceConfig, n, 0, 1)
   345  
   346  	channelName := "chanA"
   347  	peerIndexes := make([]int, n)
   348  	for i := 0; i < n; i++ {
   349  		peerIndexes[i] = i
   350  	}
   351  
   352  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   353  
   354  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*30, time.Second*2)
   355  
   356  	store := newTransientStore(t)
   357  	defer store.tearDown()
   358  
   359  	deliverServiceFactory := &mockDeliverServiceFactory{
   360  		service: &mockDeliverService{
   361  			running: make(map[string]bool),
   362  		},
   363  	}
   364  
   365  	for i := 0; i < n; i++ {
   366  		gossips[i].deliveryFactory = deliverServiceFactory
   367  		assert.Panics(t, func() {
   368  			gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   369  				Committer: &mockLedgerInfo{1},
   370  			})
   371  		}, "Dynamic leader election based and static connection to ordering service can't exist simultaneously")
   372  	}
   373  
   374  	stopPeers(gossips)
   375  }
   376  
   377  type mockDeliverServiceFactory struct {
   378  	service *mockDeliverService
   379  }
   380  
   381  func (mf *mockDeliverServiceFactory) Service(GossipServiceAdapter, *orderers.ConnectionSource, api.MessageCryptoService) deliverservice.DeliverService {
   382  	return mf.service
   383  }
   384  
   385  type mockDeliverService struct {
   386  	running map[string]bool
   387  }
   388  
   389  func (ds *mockDeliverService) StartDeliverForChannel(chainID string, ledgerInfo blocksprovider.LedgerInfo, finalizer func()) error {
   390  	ds.running[chainID] = true
   391  	return nil
   392  }
   393  
   394  func (ds *mockDeliverService) StopDeliverForChannel(chainID string) error {
   395  	ds.running[chainID] = false
   396  	return nil
   397  }
   398  
   399  func (ds *mockDeliverService) Stop() {
   400  }
   401  
   402  type mockLedgerInfo struct {
   403  	Height uint64
   404  }
   405  
   406  func (li *mockLedgerInfo) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   407  	panic("implement me")
   408  }
   409  
   410  func (li *mockLedgerInfo) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   411  	panic("implement me")
   412  }
   413  
   414  func (li *mockLedgerInfo) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   415  	panic("implement me")
   416  }
   417  
   418  func (li *mockLedgerInfo) CommitLegacy(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   419  	panic("implement me")
   420  }
   421  
   422  func (li *mockLedgerInfo) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   423  	panic("implement me")
   424  }
   425  
   426  func (li *mockLedgerInfo) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) {
   427  	panic("implement me")
   428  }
   429  
   430  // LedgerHeight returns mocked value to the ledger height
   431  func (li *mockLedgerInfo) LedgerHeight() (uint64, error) {
   432  	return li.Height, nil
   433  }
   434  
   435  func (li *mockLedgerInfo) DoesPvtDataInfoExistInLedger(blkNum uint64) (bool, error) {
   436  	return false, nil
   437  }
   438  
   439  // Commit block to the ledger
   440  func (li *mockLedgerInfo) Commit(block *common.Block) error {
   441  	return nil
   442  }
   443  
   444  // Gets blocks with sequence numbers provided in the slice
   445  func (li *mockLedgerInfo) GetBlocks(blockSeqs []uint64) []*common.Block {
   446  	return make([]*common.Block, 0)
   447  }
   448  
   449  // Closes committing service
   450  func (li *mockLedgerInfo) Close() {
   451  }
   452  
   453  func TestLeaderElectionWithRealGossip(t *testing.T) {
   454  	t.Parallel()
   455  	// Spawn 10 gossip instances with single channel and inside same organization
   456  	// Run leader election on top of each gossip instance and check that only one leader chosen
   457  	// Create another channel includes sub-set of peers over same gossip instances {1,3,5,7}
   458  	// Run additional leader election services for new channel
   459  	// Check correct leader still exist for first channel and new correct leader chosen in second channel
   460  	// Stop gossip instances of leader peers for both channels and see that new leader chosen for both
   461  
   462  	// Creating gossip service instances for peers
   463  	serviceConfig := &ServiceConfig{
   464  		UseLeaderElection:                false,
   465  		OrgLeader:                        false,
   466  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   467  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   468  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   469  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   470  	}
   471  
   472  	n := 10
   473  	gossips := startPeers(t, serviceConfig, n, 0, 1, 2, 3, 4)
   474  	// Joining all peers to first channel
   475  	channelName := "chanA"
   476  	peerIndexes := make([]int, n)
   477  	for i := 0; i < n; i++ {
   478  		peerIndexes[i] = i
   479  	}
   480  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   481  
   482  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*30, time.Second*2)
   483  
   484  	logger.Warning("Starting leader election services")
   485  
   486  	//Starting leader election services
   487  	services := make([]*electionService, n)
   488  
   489  	electionMetrics := gossipmetrics.NewGossipMetrics(&disabled.Provider{}).ElectionMetrics
   490  
   491  	for i := 0; i < n; i++ {
   492  		services[i] = &electionService{nil, false, 0}
   493  		services[i].LeaderElectionService = gossips[i].newLeaderElectionComponent(channelName, services[i].callback, electionMetrics)
   494  	}
   495  
   496  	logger.Warning("Waiting for leader election")
   497  
   498  	assert.True(t, waitForLeaderElection(t, services, time.Second*30, time.Second*2), "One leader should be selected")
   499  
   500  	startsNum := 0
   501  	for i := 0; i < n; i++ {
   502  		// Is callback function was invoked by this leader election service instance
   503  		if services[i].callbackInvokeRes {
   504  			startsNum++
   505  		}
   506  	}
   507  	//Only leader should invoke callback function, so it is double check that only one leader exists
   508  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanA")
   509  
   510  	// Adding some peers to new channel and creating leader election services for peers in new channel
   511  	// Expecting peer 1 (first in list of election services) to become leader of second channel
   512  	secondChannelPeerIndexes := []int{1, 3, 5, 7}
   513  	secondChannelName := "chanB"
   514  	secondChannelServices := make([]*electionService, len(secondChannelPeerIndexes))
   515  	addPeersToChannel(t, n, secondChannelName, gossips, secondChannelPeerIndexes)
   516  
   517  	secondChannelGossips := make([]*gossipGRPC, 0)
   518  	for _, i := range secondChannelPeerIndexes {
   519  		secondChannelGossips = append(secondChannelGossips, gossips[i])
   520  	}
   521  	waitForFullMembershipOrFailNow(t, secondChannelName, secondChannelGossips, len(secondChannelGossips), time.Second*30, time.Millisecond*100)
   522  
   523  	for idx, i := range secondChannelPeerIndexes {
   524  		secondChannelServices[idx] = &electionService{nil, false, 0}
   525  		secondChannelServices[idx].LeaderElectionService =
   526  			gossips[i].newLeaderElectionComponent(secondChannelName, secondChannelServices[idx].callback, electionMetrics)
   527  	}
   528  
   529  	assert.True(t, waitForLeaderElection(t, secondChannelServices, time.Second*30, time.Second*2), "One leader should be selected for chanB")
   530  	assert.True(t, waitForLeaderElection(t, services, time.Second*30, time.Second*2), "One leader should be selected for chanA")
   531  
   532  	startsNum = 0
   533  	for i := 0; i < n; i++ {
   534  		if services[i].callbackInvokeRes {
   535  			startsNum++
   536  		}
   537  	}
   538  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanA")
   539  
   540  	startsNum = 0
   541  	for i := 0; i < len(secondChannelServices); i++ {
   542  		if secondChannelServices[i].callbackInvokeRes {
   543  			startsNum++
   544  		}
   545  	}
   546  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanB")
   547  
   548  	//Stopping 2 gossip instances(peer 0 and peer 1), should init re-election
   549  	//Now peer 2 become leader for first channel and peer 3 for second channel
   550  
   551  	logger.Warning("Killing 2 peers, initiation new leader election")
   552  
   553  	stopPeers(gossips[:2])
   554  
   555  	waitForFullMembershipOrFailNow(t, channelName, gossips[2:], n-2, time.Second*30, time.Millisecond*100)
   556  	waitForFullMembershipOrFailNow(t, secondChannelName, secondChannelGossips[1:], len(secondChannelGossips)-1, time.Second*30, time.Millisecond*100)
   557  
   558  	assert.True(t, waitForLeaderElection(t, services[2:], time.Second*30, time.Second*2), "One leader should be selected after re-election - chanA")
   559  	assert.True(t, waitForLeaderElection(t, secondChannelServices[1:], time.Second*30, time.Second*2), "One leader should be selected after re-election - chanB")
   560  
   561  	startsNum = 0
   562  	for i := 2; i < n; i++ {
   563  		if services[i].callbackInvokeRes {
   564  			startsNum++
   565  		}
   566  	}
   567  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called after re-election - chanA")
   568  
   569  	startsNum = 0
   570  	for i := 1; i < len(secondChannelServices); i++ {
   571  		if secondChannelServices[i].callbackInvokeRes {
   572  			startsNum++
   573  		}
   574  	}
   575  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called after re-election - chanB")
   576  
   577  	stopServices(secondChannelServices)
   578  	stopServices(services)
   579  	stopPeers(gossips[2:])
   580  }
   581  
   582  type electionService struct {
   583  	election.LeaderElectionService
   584  	callbackInvokeRes   bool
   585  	callbackInvokeCount int
   586  }
   587  
   588  func (es *electionService) callback(isLeader bool) {
   589  	es.callbackInvokeRes = isLeader
   590  	es.callbackInvokeCount = es.callbackInvokeCount + 1
   591  }
   592  
   593  type joinChanMsg struct {
   594  }
   595  
   596  // SequenceNumber returns the sequence number of the block this joinChanMsg
   597  // is derived from
   598  func (jmc *joinChanMsg) SequenceNumber() uint64 {
   599  	return uint64(time.Now().UnixNano())
   600  }
   601  
   602  // Members returns the organizations of the channel
   603  func (jmc *joinChanMsg) Members() []api.OrgIdentityType {
   604  	return []api.OrgIdentityType{orgInChannelA}
   605  }
   606  
   607  // AnchorPeersOf returns the anchor peers of the given organization
   608  func (jmc *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
   609  	return []api.AnchorPeer{}
   610  }
   611  
   612  func waitForFullMembershipOrFailNow(t *testing.T, channel string, gossips []*gossipGRPC, peersNum int, timeout time.Duration, testPollInterval time.Duration) {
   613  	end := time.Now().Add(timeout)
   614  	var correctPeers int
   615  	for time.Now().Before(end) {
   616  		correctPeers = 0
   617  		for _, g := range gossips {
   618  			if len(g.PeersOfChannel(gossipcommon.ChannelID(channel))) == (peersNum - 1) {
   619  				correctPeers++
   620  			}
   621  		}
   622  		if correctPeers == peersNum {
   623  			return
   624  		}
   625  		time.Sleep(testPollInterval)
   626  	}
   627  	t.Fatalf("Failed to establish full channel membership. Only %d out of %d peers have full membership", correctPeers, peersNum)
   628  }
   629  
   630  func waitForMultipleLeadersElection(t *testing.T, services []*electionService, leadersNum int, timeout time.Duration, testPollInterval time.Duration) bool {
   631  	logger.Warning("Waiting for", leadersNum, "leaders")
   632  	end := time.Now().Add(timeout)
   633  	correctNumberOfLeadersFound := false
   634  	leaders := 0
   635  	for time.Now().Before(end) {
   636  		leaders = 0
   637  		for _, s := range services {
   638  			if s.IsLeader() {
   639  				leaders++
   640  			}
   641  		}
   642  		if leaders == leadersNum {
   643  			if correctNumberOfLeadersFound {
   644  				return true
   645  			}
   646  			correctNumberOfLeadersFound = true
   647  		} else {
   648  			correctNumberOfLeadersFound = false
   649  		}
   650  		time.Sleep(testPollInterval)
   651  	}
   652  	logger.Warning("Incorrect number of leaders", leaders)
   653  	for i, s := range services {
   654  		logger.Warning("Peer at index", i, "is leader", s.IsLeader())
   655  	}
   656  	return false
   657  }
   658  
   659  func waitForLeaderElection(t *testing.T, services []*electionService, timeout time.Duration, testPollInterval time.Duration) bool {
   660  	return waitForMultipleLeadersElection(t, services, 1, timeout, testPollInterval)
   661  }
   662  
   663  func waitUntilOrFailBlocking(t *testing.T, f func(), timeout time.Duration) {
   664  	successChan := make(chan struct{}, 1)
   665  	go func() {
   666  		f()
   667  		successChan <- struct{}{}
   668  	}()
   669  	select {
   670  	case <-time.NewTimer(timeout).C:
   671  		break
   672  	case <-successChan:
   673  		return
   674  	}
   675  	util.PrintStackTrace()
   676  	assert.Fail(t, "Timeout expired!")
   677  }
   678  
   679  func stopServices(services []*electionService) {
   680  	stoppingWg := sync.WaitGroup{}
   681  	stoppingWg.Add(len(services))
   682  	for i, sI := range services {
   683  		go func(i int, s_i election.LeaderElectionService) {
   684  			defer stoppingWg.Done()
   685  			s_i.Stop()
   686  		}(i, sI)
   687  	}
   688  	stoppingWg.Wait()
   689  	time.Sleep(time.Second * time.Duration(2))
   690  }
   691  
   692  func stopPeers(peers []*gossipGRPC) {
   693  	stoppingWg := sync.WaitGroup{}
   694  	stoppingWg.Add(len(peers))
   695  	for i, pI := range peers {
   696  		go func(i int, p_i *GossipService) {
   697  			defer stoppingWg.Done()
   698  			p_i.Stop()
   699  		}(i, pI.GossipService)
   700  	}
   701  	stoppingWg.Wait()
   702  	time.Sleep(time.Second * time.Duration(2))
   703  }
   704  
   705  func addPeersToChannel(t *testing.T, n int, channel string, peers []*gossipGRPC, peerIndexes []int) {
   706  	jcm := &joinChanMsg{}
   707  
   708  	wg := sync.WaitGroup{}
   709  	for _, i := range peerIndexes {
   710  		wg.Add(1)
   711  		go func(i int) {
   712  			peers[i].JoinChan(jcm, gossipcommon.ChannelID(channel))
   713  			peers[i].UpdateLedgerHeight(0, gossipcommon.ChannelID(channel))
   714  			wg.Done()
   715  		}(i)
   716  	}
   717  	waitUntilOrFailBlocking(t, wg.Wait, time.Second*10)
   718  }
   719  
   720  func startPeers(t *testing.T, serviceConfig *ServiceConfig, n int, boot ...int) []*gossipGRPC {
   721  	var ports []int
   722  	var grpcs []*comm.GRPCServer
   723  	var certs []*gossipcommon.TLSCertificates
   724  	var secDialOpts []api.PeerSecureDialOpts
   725  
   726  	for i := 0; i < n; i++ {
   727  		port, grpc, cert, secDialOpt, _ := util.CreateGRPCLayer()
   728  		ports = append(ports, port)
   729  		grpcs = append(grpcs, grpc)
   730  		certs = append(certs, cert)
   731  		secDialOpts = append(secDialOpts, secDialOpt)
   732  	}
   733  
   734  	var bootPorts []int
   735  	for _, index := range boot {
   736  		bootPorts = append(bootPorts, ports[index])
   737  	}
   738  
   739  	peers := make([]*gossipGRPC, n)
   740  	wg := sync.WaitGroup{}
   741  	for i := 0; i < n; i++ {
   742  		wg.Add(1)
   743  		go func(i int) {
   744  			peers[i] = newGossipInstance(serviceConfig, ports[i], i, grpcs[i], certs[i], secDialOpts[i], 100, bootPorts...)
   745  			wg.Done()
   746  		}(i)
   747  	}
   748  	waitUntilOrFailBlocking(t, wg.Wait, time.Second*10)
   749  
   750  	return peers
   751  }
   752  
   753  func newGossipInstance(serviceConfig *ServiceConfig, port int, id int, gRPCServer *comm.GRPCServer, certs *gossipcommon.TLSCertificates,
   754  	secureDialOpts api.PeerSecureDialOpts, maxMsgCount int, bootPorts ...int) *gossipGRPC {
   755  	conf := &gossip.Config{
   756  		BindPort:                     port,
   757  		BootstrapPeers:               bootPeers(bootPorts...),
   758  		ID:                           fmt.Sprintf("p%d", id),
   759  		MaxBlockCountToStore:         maxMsgCount,
   760  		MaxPropagationBurstLatency:   time.Duration(500) * time.Millisecond,
   761  		MaxPropagationBurstSize:      20,
   762  		PropagateIterations:          1,
   763  		PropagatePeerNum:             3,
   764  		PullInterval:                 time.Duration(2) * time.Second,
   765  		PullPeerNum:                  5,
   766  		InternalEndpoint:             fmt.Sprintf("127.0.0.1:%d", port),
   767  		ExternalEndpoint:             fmt.Sprintf("1.2.3.4:%d", port),
   768  		PublishCertPeriod:            time.Duration(4) * time.Second,
   769  		PublishStateInfoInterval:     time.Duration(1) * time.Second,
   770  		RequestStateInfoInterval:     time.Duration(1) * time.Second,
   771  		TimeForMembershipTracker:     time.Second * 5,
   772  		TLSCerts:                     certs,
   773  		DigestWaitTime:               algo.DefDigestWaitTime,
   774  		RequestWaitTime:              algo.DefRequestWaitTime,
   775  		ResponseWaitTime:             algo.DefResponseWaitTime,
   776  		DialTimeout:                  gcomm.DefDialTimeout,
   777  		ConnTimeout:                  gcomm.DefConnTimeout,
   778  		RecvBuffSize:                 gcomm.DefRecvBuffSize,
   779  		SendBuffSize:                 gcomm.DefSendBuffSize,
   780  		MsgExpirationTimeout:         channel.DefMsgExpirationTimeout,
   781  		AliveTimeInterval:            discovery.DefAliveTimeInterval,
   782  		AliveExpirationTimeout:       discovery.DefAliveExpirationTimeout,
   783  		AliveExpirationCheckInterval: discovery.DefAliveExpirationCheckInterval,
   784  		ReconnectInterval:            time.Duration(1) * time.Second,
   785  	}
   786  	selfID := api.PeerIdentityType(conf.InternalEndpoint)
   787  	cryptoService := &naiveCryptoService{}
   788  	metrics := gossipmetrics.NewGossipMetrics(&disabled.Provider{})
   789  	gossip := gossip.New(
   790  		conf,
   791  		gRPCServer.Server(),
   792  		&orgCryptoService{},
   793  		cryptoService,
   794  		selfID,
   795  		secureDialOpts,
   796  		metrics,
   797  	)
   798  	go gRPCServer.Start()
   799  
   800  	gossipService := &GossipService{
   801  		mcs:             cryptoService,
   802  		gossipSvc:       gossip,
   803  		chains:          make(map[string]state.GossipStateProvider),
   804  		leaderElection:  make(map[string]election.LeaderElectionService),
   805  		privateHandlers: make(map[string]privateHandler),
   806  		deliveryService: make(map[string]deliverservice.DeliverService),
   807  		deliveryFactory: &deliveryFactoryImpl{
   808  			credentialSupport: comm.NewCredentialSupport(),
   809  		},
   810  		peerIdentity:  api.PeerIdentityType(conf.InternalEndpoint),
   811  		metrics:       metrics,
   812  		serviceConfig: serviceConfig,
   813  	}
   814  
   815  	return &gossipGRPC{GossipService: gossipService, grpc: gRPCServer}
   816  }
   817  
   818  type gossipGRPC struct {
   819  	*GossipService
   820  	grpc *comm.GRPCServer
   821  }
   822  
   823  func (g *gossipGRPC) Stop() {
   824  	g.GossipService.Stop()
   825  	g.grpc.Stop()
   826  }
   827  
   828  func bootPeers(ports ...int) []string {
   829  	var peers []string
   830  	for _, port := range ports {
   831  		peers = append(peers, fmt.Sprintf("127.0.0.1:%d", port))
   832  	}
   833  	return peers
   834  }
   835  
   836  func getAvailablePort(t *testing.T) (endpoint string, ll net.Listener) {
   837  	ll, err := net.Listen("tcp", "127.0.0.1:0")
   838  	assert.NoError(t, err)
   839  	endpoint = ll.Addr().String()
   840  	return endpoint, ll
   841  }
   842  
   843  type naiveCryptoService struct {
   844  }
   845  
   846  type orgCryptoService struct {
   847  }
   848  
   849  // OrgByPeerIdentity returns the OrgIdentityType
   850  // of a given peer identity
   851  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
   852  	return orgInChannelA
   853  }
   854  
   855  // Verify verifies a JoinChanMessage, returns nil on success,
   856  // and an error on failure
   857  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
   858  	return nil
   859  }
   860  
   861  func (naiveCryptoService) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) {
   862  	return time.Now().Add(time.Hour), nil
   863  }
   864  
   865  // VerifyByChannel verifies a peer's signature on a message in the context
   866  // of a specific channel
   867  func (*naiveCryptoService) VerifyByChannel(_ gossipcommon.ChannelID, _ api.PeerIdentityType, _, _ []byte) error {
   868  	return nil
   869  }
   870  
   871  func (*naiveCryptoService) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   872  	return nil
   873  }
   874  
   875  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   876  func (*naiveCryptoService) GetPKIidOfCert(peerIdentity api.PeerIdentityType) gossipcommon.PKIidType {
   877  	return gossipcommon.PKIidType(peerIdentity)
   878  }
   879  
   880  // VerifyBlock returns nil if the block is properly signed,
   881  // else returns error
   882  func (*naiveCryptoService) VerifyBlock(chainID gossipcommon.ChannelID, seqNum uint64, signedBlock *common.Block) error {
   883  	return nil
   884  }
   885  
   886  // Sign signs msg with this peer's signing key and outputs
   887  // the signature if no error occurred.
   888  func (*naiveCryptoService) Sign(msg []byte) ([]byte, error) {
   889  	return msg, nil
   890  }
   891  
   892  // Verify checks that signature is a valid signature of message under a peer's verification key.
   893  // If the verification succeeded, Verify returns nil meaning no error occurred.
   894  // If peerCert is nil, then the signature is verified against this peer's verification key.
   895  func (*naiveCryptoService) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   896  	equal := bytes.Equal(signature, message)
   897  	if !equal {
   898  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   899  	}
   900  	return nil
   901  }
   902  
   903  var orgInChannelA = api.OrgIdentityType("ORG1")
   904  
   905  func TestInvalidInitialization(t *testing.T) {
   906  	grpcServer := grpc.NewServer()
   907  	endpoint, socket := getAvailablePort(t)
   908  
   909  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   910  	assert.NoError(t, err)
   911  
   912  	mockSignerSerializer := &mocks.SignerSerializer{}
   913  	mockSignerSerializer.SerializeReturns(api.PeerIdentityType("peer-identity"), nil)
   914  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(cryptoProvider))
   915  	gossipConfig, err := gossip.GlobalConfig(endpoint, nil)
   916  	assert.NoError(t, err)
   917  
   918  	grpcClient, err := comm.NewGRPCClient(comm.ClientConfig{})
   919  	require.NoError(t, err)
   920  
   921  	gossipService, err := New(
   922  		mockSignerSerializer,
   923  		gossipmetrics.NewGossipMetrics(&disabled.Provider{}),
   924  		endpoint,
   925  		grpcServer,
   926  		&naiveCryptoService{},
   927  		secAdv,
   928  		nil,
   929  		comm.NewCredentialSupport(),
   930  		grpcClient,
   931  		gossipConfig,
   932  		&ServiceConfig{},
   933  		&deliverservice.DeliverServiceConfig{
   934  			PeerTLSEnabled:              false,
   935  			ReConnectBackoffThreshold:   deliverservice.DefaultReConnectBackoffThreshold,
   936  			ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold,
   937  		},
   938  	)
   939  	assert.NoError(t, err)
   940  	gService := gossipService
   941  	defer gService.Stop()
   942  
   943  	go grpcServer.Serve(socket)
   944  	defer grpcServer.Stop()
   945  
   946  	dc := gService.deliveryFactory.Service(gService, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), &naiveCryptoService{})
   947  	assert.NotNil(t, dc)
   948  }
   949  
   950  func TestChannelConfig(t *testing.T) {
   951  	// Test whenever gossip service is indeed singleton
   952  	grpcServer := grpc.NewServer()
   953  	endpoint, socket := getAvailablePort(t)
   954  
   955  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   956  	assert.NoError(t, err)
   957  
   958  	mockSignerSerializer := &mocks.SignerSerializer{}
   959  	mockSignerSerializer.SerializeReturns(api.PeerIdentityType("peer-identity"), nil)
   960  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(cryptoProvider))
   961  	gossipConfig, err := gossip.GlobalConfig(endpoint, nil)
   962  	assert.NoError(t, err)
   963  
   964  	grpcClient, err := comm.NewGRPCClient(comm.ClientConfig{})
   965  
   966  	gossipService, err := New(
   967  		mockSignerSerializer,
   968  		gossipmetrics.NewGossipMetrics(&disabled.Provider{}),
   969  		endpoint,
   970  		grpcServer,
   971  		&naiveCryptoService{},
   972  		secAdv,
   973  		nil,
   974  		nil,
   975  		grpcClient,
   976  		gossipConfig,
   977  		&ServiceConfig{},
   978  		&deliverservice.DeliverServiceConfig{
   979  			ReConnectBackoffThreshold:   deliverservice.DefaultReConnectBackoffThreshold,
   980  			ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold,
   981  		},
   982  	)
   983  	assert.NoError(t, err)
   984  	gService := gossipService
   985  	defer gService.Stop()
   986  
   987  	go grpcServer.Serve(socket)
   988  	defer grpcServer.Stop()
   989  
   990  	jcm := &joinChannelMessage{seqNum: 1, members2AnchorPeers: map[string][]api.AnchorPeer{
   991  		"A": {{Host: "host", Port: 5000}},
   992  	}}
   993  
   994  	assert.Equal(t, uint64(1), jcm.SequenceNumber())
   995  
   996  	mc := &mockConfig{
   997  		sequence: 1,
   998  		orgs: map[string]channelconfig.ApplicationOrg{
   999  			string(orgInChannelA): &appGrp{
  1000  				mspID:       string(orgInChannelA),
  1001  				anchorPeers: []*peer.AnchorPeer{},
  1002  			},
  1003  		},
  1004  	}
  1005  	gService.JoinChan(jcm, gossipcommon.ChannelID("A"))
  1006  	gService.updateAnchors(mc)
  1007  	assert.True(t, gService.amIinChannel(string(orgInChannelA), mc))
  1008  }
  1009  
  1010  func defaultDeliverClientDialOpts() []grpc.DialOption {
  1011  	dialOpts := []grpc.DialOption{grpc.WithBlock()}
  1012  	dialOpts = append(
  1013  		dialOpts,
  1014  		grpc.WithDefaultCallOptions(
  1015  			grpc.MaxCallRecvMsgSize(comm.MaxRecvMsgSize),
  1016  			grpc.MaxCallSendMsgSize(comm.MaxSendMsgSize)))
  1017  	kaOpts := comm.DefaultKeepaliveOptions
  1018  	dialOpts = append(dialOpts, comm.ClientKeepaliveOptions(kaOpts)...)
  1019  
  1020  	return dialOpts
  1021  }