github.com/ewagmig/fabric@v2.1.1+incompatible/gossip/service/gossip_service_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package service
     8  
     9  import (
    10  	"bytes"
    11  	"fmt"
    12  	"io/ioutil"
    13  	"net"
    14  	"os"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/hyperledger/fabric-protos-go/common"
    19  	"github.com/hyperledger/fabric-protos-go/peer"
    20  	transientstore2 "github.com/hyperledger/fabric-protos-go/transientstore"
    21  	"github.com/hyperledger/fabric/bccsp/factory"
    22  	"github.com/hyperledger/fabric/bccsp/sw"
    23  	"github.com/hyperledger/fabric/common/channelconfig"
    24  	"github.com/hyperledger/fabric/common/flogging"
    25  	"github.com/hyperledger/fabric/common/metrics/disabled"
    26  	"github.com/hyperledger/fabric/core/deliverservice"
    27  	"github.com/hyperledger/fabric/core/ledger"
    28  	"github.com/hyperledger/fabric/core/transientstore"
    29  	"github.com/hyperledger/fabric/gossip/api"
    30  	gcomm "github.com/hyperledger/fabric/gossip/comm"
    31  	gossipcommon "github.com/hyperledger/fabric/gossip/common"
    32  	"github.com/hyperledger/fabric/gossip/discovery"
    33  	"github.com/hyperledger/fabric/gossip/election"
    34  	"github.com/hyperledger/fabric/gossip/gossip"
    35  	"github.com/hyperledger/fabric/gossip/gossip/algo"
    36  	"github.com/hyperledger/fabric/gossip/gossip/channel"
    37  	gossipmetrics "github.com/hyperledger/fabric/gossip/metrics"
    38  	"github.com/hyperledger/fabric/gossip/privdata"
    39  	"github.com/hyperledger/fabric/gossip/state"
    40  	"github.com/hyperledger/fabric/gossip/util"
    41  	peergossip "github.com/hyperledger/fabric/internal/peer/gossip"
    42  	"github.com/hyperledger/fabric/internal/peer/gossip/mocks"
    43  	"github.com/hyperledger/fabric/internal/pkg/comm"
    44  	"github.com/hyperledger/fabric/internal/pkg/identity"
    45  	"github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider"
    46  	"github.com/hyperledger/fabric/internal/pkg/peer/orderers"
    47  	"github.com/hyperledger/fabric/msp/mgmt"
    48  	msptesttools "github.com/hyperledger/fabric/msp/mgmt/testtools"
    49  	"github.com/stretchr/testify/assert"
    50  	"github.com/stretchr/testify/require"
    51  	"google.golang.org/grpc"
    52  )
    53  
    54  const TIMEOUT = 45 * time.Second
    55  
    56  func init() {
    57  	util.SetupTestLogging()
    58  }
    59  
    60  //go:generate counterfeiter -o mocks/signer_serializer.go --fake-name SignerSerializer . signerSerializer
    61  
    62  type signerSerializer interface {
    63  	identity.SignerSerializer
    64  }
    65  
    66  type testTransientStore struct {
    67  	storeProvider transientstore.StoreProvider
    68  	Store         *transientstore.Store
    69  	tempdir       string
    70  }
    71  
    72  func newTransientStore(t *testing.T) *testTransientStore {
    73  	s := &testTransientStore{}
    74  	var err error
    75  	s.tempdir, err = ioutil.TempDir("", "ts")
    76  	if err != nil {
    77  		t.Fatalf("Failed to create test directory, got err %s", err)
    78  		return s
    79  	}
    80  	s.storeProvider, err = transientstore.NewStoreProvider(s.tempdir)
    81  	if err != nil {
    82  		t.Fatalf("Failed to open store, got err %s", err)
    83  		return s
    84  	}
    85  	s.Store, err = s.storeProvider.OpenStore("test")
    86  	if err != nil {
    87  		t.Fatalf("Failed to open store, got err %s", err)
    88  		return s
    89  	}
    90  	return s
    91  }
    92  
    93  func (s *testTransientStore) tearDown() {
    94  	s.storeProvider.Close()
    95  	os.RemoveAll(s.tempdir)
    96  }
    97  
    98  func (s *testTransientStore) Persist(txid string, blockHeight uint64,
    99  	privateSimulationResultsWithConfig *transientstore2.TxPvtReadWriteSetWithConfigInfo) error {
   100  	return s.Store.Persist(txid, blockHeight, privateSimulationResultsWithConfig)
   101  }
   102  
   103  func (s *testTransientStore) GetTxPvtRWSetByTxid(txid string, filter ledger.PvtNsCollFilter) (privdata.RWSetScanner, error) {
   104  	return s.Store.GetTxPvtRWSetByTxid(txid, filter)
   105  }
   106  
   107  func TestInitGossipService(t *testing.T) {
   108  	grpcServer := grpc.NewServer()
   109  	endpoint, socket := getAvailablePort(t)
   110  
   111  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   112  	assert.NoError(t, err)
   113  
   114  	msptesttools.LoadMSPSetupForTesting()
   115  	signer := mgmt.GetLocalSigningIdentityOrPanic(cryptoProvider)
   116  
   117  	messageCryptoService := peergossip.NewMCS(&mocks.ChannelPolicyManagerGetter{}, signer, mgmt.NewDeserializersManager(cryptoProvider), cryptoProvider)
   118  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(cryptoProvider))
   119  	gossipConfig, err := gossip.GlobalConfig(endpoint, nil)
   120  	assert.NoError(t, err)
   121  
   122  	grpcClient, err := comm.NewGRPCClient(comm.ClientConfig{})
   123  	require.NoError(t, err)
   124  
   125  	gossipService, err := New(
   126  		signer,
   127  		gossipmetrics.NewGossipMetrics(&disabled.Provider{}),
   128  		endpoint,
   129  		grpcServer,
   130  		messageCryptoService,
   131  		secAdv,
   132  		nil,
   133  		comm.NewCredentialSupport(),
   134  		grpcClient,
   135  		gossipConfig,
   136  		&ServiceConfig{},
   137  		&privdata.PrivdataConfig{},
   138  		&deliverservice.DeliverServiceConfig{
   139  			ReConnectBackoffThreshold:   deliverservice.DefaultReConnectBackoffThreshold,
   140  			ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold,
   141  		},
   142  	)
   143  	assert.NoError(t, err)
   144  
   145  	go grpcServer.Serve(socket)
   146  	defer grpcServer.Stop()
   147  
   148  	defer gossipService.Stop()
   149  }
   150  
   151  // Make sure *joinChannelMessage implements the api.JoinChannelMessage
   152  func TestJCMInterface(t *testing.T) {
   153  	_ = api.JoinChannelMessage(&joinChannelMessage{})
   154  	t.Parallel()
   155  }
   156  
   157  func TestLeaderElectionWithDeliverClient(t *testing.T) {
   158  	t.Parallel()
   159  	//Test check if leader election works with mock deliver service instance
   160  	//Configuration set to use dynamic leader election
   161  	//10 peers started, added to channel and at the end we check if only for one peer
   162  	//mockDeliverService.StartDeliverForChannel was invoked
   163  
   164  	n := 10
   165  	serviceConfig := &ServiceConfig{
   166  		UseLeaderElection:                true,
   167  		OrgLeader:                        false,
   168  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   169  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   170  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   171  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   172  	}
   173  	gossips := startPeers(serviceConfig, n, 0, 1, 2, 3, 4)
   174  
   175  	channelName := "chanA"
   176  	peerIndexes := make([]int, n)
   177  	for i := 0; i < n; i++ {
   178  		peerIndexes[i] = i
   179  	}
   180  	addPeersToChannel(channelName, gossips, peerIndexes)
   181  
   182  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, TIMEOUT, time.Second*2)
   183  
   184  	services := make([]*electionService, n)
   185  
   186  	store := newTransientStore(t)
   187  	defer store.tearDown()
   188  
   189  	for i := 0; i < n; i++ {
   190  		deliverServiceFactory := &mockDeliverServiceFactory{
   191  			service: &mockDeliverService{
   192  				running: make(map[string]bool),
   193  			},
   194  		}
   195  		gossips[i].deliveryFactory = deliverServiceFactory
   196  		deliverServiceFactory.service.running[channelName] = false
   197  
   198  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   199  			Committer: &mockLedgerInfo{1},
   200  		})
   201  		service, exist := gossips[i].leaderElection[channelName]
   202  		assert.True(t, exist, "Leader election service should be created for peer %d and channel %s", i, channelName)
   203  		services[i] = &electionService{nil, false, 0}
   204  		services[i].LeaderElectionService = service
   205  	}
   206  
   207  	// Is single leader was elected.
   208  	assert.True(t, waitForLeaderElection(services, time.Second*30, time.Second*2), "One leader should be selected")
   209  
   210  	startsNum := 0
   211  	for i := 0; i < n; i++ {
   212  		// Is mockDeliverService.StartDeliverForChannel in current peer for the specific channel was invoked
   213  		if gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName] {
   214  			startsNum++
   215  		}
   216  	}
   217  
   218  	assert.Equal(t, 1, startsNum, "Only for one peer delivery client should start")
   219  
   220  	stopPeers(gossips)
   221  }
   222  
   223  func TestWithStaticDeliverClientLeader(t *testing.T) {
   224  	// Tests check if static leader flag works ok.
   225  	// Leader election flag set to false, and static leader flag set to true
   226  	// Two gossip service instances (peers) created.
   227  	// Each peer is added to channel and should run mock delivery client
   228  	// After that each peer added to another client and it should run deliver client for this channel as well.
   229  
   230  	serviceConfig := &ServiceConfig{
   231  		UseLeaderElection:                false,
   232  		OrgLeader:                        true,
   233  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   234  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   235  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   236  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   237  	}
   238  	n := 2
   239  	gossips := startPeers(serviceConfig, n, 0, 1)
   240  	channelName := "chanA"
   241  	peerIndexes := make([]int, n)
   242  	for i := 0; i < n; i++ {
   243  		peerIndexes[i] = i
   244  	}
   245  	addPeersToChannel(channelName, gossips, peerIndexes)
   246  
   247  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, TIMEOUT, time.Second*2)
   248  
   249  	store := newTransientStore(t)
   250  	defer store.tearDown()
   251  
   252  	deliverServiceFactory := &mockDeliverServiceFactory{
   253  		service: &mockDeliverService{
   254  			running: make(map[string]bool),
   255  		},
   256  	}
   257  
   258  	for i := 0; i < n; i++ {
   259  		gossips[i].deliveryFactory = deliverServiceFactory
   260  		deliverServiceFactory.service.running[channelName] = false
   261  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   262  			Committer: &mockLedgerInfo{1},
   263  		})
   264  	}
   265  
   266  	for i := 0; i < n; i++ {
   267  		assert.NotNil(t, gossips[i].deliveryService[channelName], "Delivery service for channel %s not initiated in peer %d", channelName, i)
   268  		assert.True(t, gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName], "Block deliverer not started for peer %d", i)
   269  	}
   270  
   271  	channelName = "chanB"
   272  	for i := 0; i < n; i++ {
   273  		deliverServiceFactory.service.running[channelName] = false
   274  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   275  			Committer: &mockLedgerInfo{1},
   276  		})
   277  	}
   278  
   279  	for i := 0; i < n; i++ {
   280  		assert.NotNil(t, gossips[i].deliveryService[channelName], "Delivery service for channel %s not initiated in peer %d", channelName, i)
   281  		assert.True(t, gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName], "Block deliverer not started for peer %d", i)
   282  	}
   283  
   284  	stopPeers(gossips)
   285  }
   286  
   287  func TestWithStaticDeliverClientNotLeader(t *testing.T) {
   288  
   289  	serviceConfig := &ServiceConfig{
   290  		UseLeaderElection:                false,
   291  		OrgLeader:                        false,
   292  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   293  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   294  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   295  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   296  	}
   297  	n := 2
   298  	gossips := startPeers(serviceConfig, n, 0, 1)
   299  
   300  	channelName := "chanA"
   301  	peerIndexes := make([]int, n)
   302  	for i := 0; i < n; i++ {
   303  		peerIndexes[i] = i
   304  	}
   305  
   306  	addPeersToChannel(channelName, gossips, peerIndexes)
   307  
   308  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, TIMEOUT, time.Second*2)
   309  
   310  	store := newTransientStore(t)
   311  	defer store.tearDown()
   312  
   313  	deliverServiceFactory := &mockDeliverServiceFactory{
   314  		service: &mockDeliverService{
   315  			running: make(map[string]bool),
   316  		},
   317  	}
   318  
   319  	for i := 0; i < n; i++ {
   320  		gossips[i].deliveryFactory = deliverServiceFactory
   321  		deliverServiceFactory.service.running[channelName] = false
   322  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   323  			Committer: &mockLedgerInfo{1},
   324  		})
   325  	}
   326  
   327  	for i := 0; i < n; i++ {
   328  		assert.NotNil(t, gossips[i].deliveryService[channelName], "Delivery service for channel %s not initiated in peer %d", channelName, i)
   329  		assert.False(t, gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName], "Block deliverer should not be started for peer %d", i)
   330  	}
   331  
   332  	stopPeers(gossips)
   333  }
   334  
   335  func TestWithStaticDeliverClientBothStaticAndLeaderElection(t *testing.T) {
   336  
   337  	serviceConfig := &ServiceConfig{
   338  		UseLeaderElection:                true,
   339  		OrgLeader:                        true,
   340  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   341  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   342  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   343  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   344  	}
   345  	n := 2
   346  	gossips := startPeers(serviceConfig, n, 0, 1)
   347  
   348  	channelName := "chanA"
   349  	peerIndexes := make([]int, n)
   350  	for i := 0; i < n; i++ {
   351  		peerIndexes[i] = i
   352  	}
   353  
   354  	addPeersToChannel(channelName, gossips, peerIndexes)
   355  
   356  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, TIMEOUT, time.Second*2)
   357  
   358  	store := newTransientStore(t)
   359  	defer store.tearDown()
   360  
   361  	deliverServiceFactory := &mockDeliverServiceFactory{
   362  		service: &mockDeliverService{
   363  			running: make(map[string]bool),
   364  		},
   365  	}
   366  
   367  	for i := 0; i < n; i++ {
   368  		gossips[i].deliveryFactory = deliverServiceFactory
   369  		assert.Panics(t, func() {
   370  			gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   371  				Committer: &mockLedgerInfo{1},
   372  			})
   373  		}, "Dynamic leader election based and static connection to ordering service can't exist simultaneously")
   374  	}
   375  
   376  	stopPeers(gossips)
   377  }
   378  
   379  type mockDeliverServiceFactory struct {
   380  	service *mockDeliverService
   381  }
   382  
   383  func (mf *mockDeliverServiceFactory) Service(GossipServiceAdapter, *orderers.ConnectionSource, api.MessageCryptoService, bool) deliverservice.DeliverService {
   384  	return mf.service
   385  }
   386  
   387  type mockDeliverService struct {
   388  	running map[string]bool
   389  }
   390  
   391  func (ds *mockDeliverService) StartDeliverForChannel(chainID string, ledgerInfo blocksprovider.LedgerInfo, finalizer func()) error {
   392  	ds.running[chainID] = true
   393  	return nil
   394  }
   395  
   396  func (ds *mockDeliverService) StopDeliverForChannel(chainID string) error {
   397  	ds.running[chainID] = false
   398  	return nil
   399  }
   400  
   401  func (ds *mockDeliverService) Stop() {
   402  }
   403  
   404  type mockLedgerInfo struct {
   405  	Height uint64
   406  }
   407  
   408  func (li *mockLedgerInfo) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   409  	panic("implement me")
   410  }
   411  
   412  func (li *mockLedgerInfo) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   413  	panic("implement me")
   414  }
   415  
   416  func (li *mockLedgerInfo) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   417  	panic("implement me")
   418  }
   419  
   420  func (li *mockLedgerInfo) CommitLegacy(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   421  	panic("implement me")
   422  }
   423  
   424  func (li *mockLedgerInfo) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   425  	panic("implement me")
   426  }
   427  
   428  func (li *mockLedgerInfo) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) {
   429  	panic("implement me")
   430  }
   431  
   432  // LedgerHeight returns mocked value to the ledger height
   433  func (li *mockLedgerInfo) LedgerHeight() (uint64, error) {
   434  	return li.Height, nil
   435  }
   436  
   437  func (li *mockLedgerInfo) DoesPvtDataInfoExistInLedger(blkNum uint64) (bool, error) {
   438  	return false, nil
   439  }
   440  
   441  // Commit block to the ledger
   442  func (li *mockLedgerInfo) Commit(block *common.Block) error {
   443  	return nil
   444  }
   445  
   446  // Gets blocks with sequence numbers provided in the slice
   447  func (li *mockLedgerInfo) GetBlocks(blockSeqs []uint64) []*common.Block {
   448  	return make([]*common.Block, 0)
   449  }
   450  
   451  // Closes committing service
   452  func (li *mockLedgerInfo) Close() {
   453  }
   454  
   455  func TestLeaderElectionWithRealGossip(t *testing.T) {
   456  	t.Parallel()
   457  	// Spawn 10 gossip instances with single channel and inside same organization
   458  	// Run leader election on top of each gossip instance and check that only one leader chosen
   459  	// Create another channel includes sub-set of peers over same gossip instances {1,3,5,7}
   460  	// Run additional leader election services for new channel
   461  	// Check correct leader still exist for first channel and new correct leader chosen in second channel
   462  	// Stop gossip instances of leader peers for both channels and see that new leader chosen for both
   463  
   464  	// Creating gossip service instances for peers
   465  	serviceConfig := &ServiceConfig{
   466  		UseLeaderElection:                false,
   467  		OrgLeader:                        false,
   468  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   469  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   470  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   471  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   472  	}
   473  
   474  	n := 10
   475  	gossips := startPeers(serviceConfig, n, 0, 1, 2, 3, 4)
   476  	// Joining all peers to first channel
   477  	channelName := "chanA"
   478  	peerIndexes := make([]int, n)
   479  	for i := 0; i < n; i++ {
   480  		peerIndexes[i] = i
   481  	}
   482  	addPeersToChannel(channelName, gossips, peerIndexes)
   483  
   484  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, TIMEOUT, time.Second*2)
   485  
   486  	logger.Warning("Starting leader election services")
   487  
   488  	//Starting leader election services
   489  	services := make([]*electionService, n)
   490  
   491  	electionMetrics := gossipmetrics.NewGossipMetrics(&disabled.Provider{}).ElectionMetrics
   492  
   493  	for i := 0; i < n; i++ {
   494  		services[i] = &electionService{nil, false, 0}
   495  		services[i].LeaderElectionService = gossips[i].newLeaderElectionComponent(channelName, services[i].callback, electionMetrics)
   496  	}
   497  
   498  	logger.Warning("Waiting for leader election")
   499  
   500  	assert.True(t, waitForLeaderElection(services, time.Second*30, time.Second*2), "One leader should be selected")
   501  
   502  	startsNum := 0
   503  	for i := 0; i < n; i++ {
   504  		// Is callback function was invoked by this leader election service instance
   505  		if services[i].callbackInvokeRes {
   506  			startsNum++
   507  		}
   508  	}
   509  	//Only leader should invoke callback function, so it is double check that only one leader exists
   510  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanA")
   511  
   512  	// Adding some peers to new channel and creating leader election services for peers in new channel
   513  	// Expecting peer 1 (first in list of election services) to become leader of second channel
   514  	secondChannelPeerIndexes := []int{1, 3, 5, 7}
   515  	secondChannelName := "chanB"
   516  	secondChannelServices := make([]*electionService, len(secondChannelPeerIndexes))
   517  	addPeersToChannel(secondChannelName, gossips, secondChannelPeerIndexes)
   518  
   519  	secondChannelGossips := make([]*gossipGRPC, 0)
   520  	for _, i := range secondChannelPeerIndexes {
   521  		secondChannelGossips = append(secondChannelGossips, gossips[i])
   522  	}
   523  	waitForFullMembershipOrFailNow(t, secondChannelName, secondChannelGossips, len(secondChannelGossips), TIMEOUT, time.Millisecond*100)
   524  
   525  	for idx, i := range secondChannelPeerIndexes {
   526  		secondChannelServices[idx] = &electionService{nil, false, 0}
   527  		secondChannelServices[idx].LeaderElectionService =
   528  			gossips[i].newLeaderElectionComponent(secondChannelName, secondChannelServices[idx].callback, electionMetrics)
   529  	}
   530  
   531  	assert.True(t, waitForLeaderElection(secondChannelServices, time.Second*30, time.Second*2), "One leader should be selected for chanB")
   532  	assert.True(t, waitForLeaderElection(services, time.Second*30, time.Second*2), "One leader should be selected for chanA")
   533  
   534  	startsNum = 0
   535  	for i := 0; i < n; i++ {
   536  		if services[i].callbackInvokeRes {
   537  			startsNum++
   538  		}
   539  	}
   540  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanA")
   541  
   542  	startsNum = 0
   543  	for i := 0; i < len(secondChannelServices); i++ {
   544  		if secondChannelServices[i].callbackInvokeRes {
   545  			startsNum++
   546  		}
   547  	}
   548  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanB")
   549  
   550  	//Stopping 2 gossip instances(peer 0 and peer 1), should init re-election
   551  	//Now peer 2 become leader for first channel and peer 3 for second channel
   552  
   553  	logger.Warning("Killing 2 peers, initiation new leader election")
   554  
   555  	stopPeers(gossips[:2])
   556  
   557  	waitForFullMembershipOrFailNow(t, channelName, gossips[2:], n-2, TIMEOUT, time.Millisecond*100)
   558  	waitForFullMembershipOrFailNow(t, secondChannelName, secondChannelGossips[1:], len(secondChannelGossips)-1, TIMEOUT, time.Millisecond*100)
   559  
   560  	assert.True(t, waitForLeaderElection(services[2:], time.Second*30, time.Second*2), "One leader should be selected after re-election - chanA")
   561  	assert.True(t, waitForLeaderElection(secondChannelServices[1:], time.Second*30, time.Second*2), "One leader should be selected after re-election - chanB")
   562  
   563  	startsNum = 0
   564  	for i := 2; i < n; i++ {
   565  		if services[i].callbackInvokeRes {
   566  			startsNum++
   567  		}
   568  	}
   569  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called after re-election - chanA")
   570  
   571  	startsNum = 0
   572  	for i := 1; i < len(secondChannelServices); i++ {
   573  		if secondChannelServices[i].callbackInvokeRes {
   574  			startsNum++
   575  		}
   576  	}
   577  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called after re-election - chanB")
   578  
   579  	stopServices(secondChannelServices)
   580  	stopServices(services)
   581  	stopPeers(gossips[2:])
   582  }
   583  
   584  type electionService struct {
   585  	election.LeaderElectionService
   586  	callbackInvokeRes   bool
   587  	callbackInvokeCount int
   588  }
   589  
   590  func (es *electionService) callback(isLeader bool) {
   591  	es.callbackInvokeRes = isLeader
   592  	es.callbackInvokeCount = es.callbackInvokeCount + 1
   593  }
   594  
   595  type joinChanMsg struct {
   596  }
   597  
   598  // SequenceNumber returns the sequence number of the block this joinChanMsg
   599  // is derived from
   600  func (jmc *joinChanMsg) SequenceNumber() uint64 {
   601  	return uint64(time.Now().UnixNano())
   602  }
   603  
   604  // Members returns the organizations of the channel
   605  func (jmc *joinChanMsg) Members() []api.OrgIdentityType {
   606  	return []api.OrgIdentityType{orgInChannelA}
   607  }
   608  
   609  // AnchorPeersOf returns the anchor peers of the given organization
   610  func (jmc *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
   611  	return []api.AnchorPeer{}
   612  }
   613  
   614  func waitForFullMembershipOrFailNow(t *testing.T, channel string, gossips []*gossipGRPC, peersNum int, timeout time.Duration, testPollInterval time.Duration) {
   615  	end := time.Now().Add(timeout)
   616  	var correctPeers int
   617  	for time.Now().Before(end) {
   618  		correctPeers = 0
   619  		for _, g := range gossips {
   620  			if len(g.PeersOfChannel(gossipcommon.ChannelID(channel))) == (peersNum - 1) {
   621  				correctPeers++
   622  			}
   623  		}
   624  		if correctPeers == peersNum {
   625  			return
   626  		}
   627  		time.Sleep(testPollInterval)
   628  	}
   629  	t.Fatalf("Failed to establish full channel membership. Only %d out of %d peers have full membership", correctPeers, peersNum)
   630  }
   631  
   632  func waitForMultipleLeadersElection(services []*electionService, leadersNum int, timeout time.Duration, testPollInterval time.Duration) bool {
   633  	logger.Warning("Waiting for", leadersNum, "leaders")
   634  	end := time.Now().Add(timeout)
   635  	correctNumberOfLeadersFound := false
   636  	leaders := 0
   637  	for time.Now().Before(end) {
   638  		leaders = 0
   639  		for _, s := range services {
   640  			if s.IsLeader() {
   641  				leaders++
   642  			}
   643  		}
   644  		if leaders == leadersNum {
   645  			if correctNumberOfLeadersFound {
   646  				return true
   647  			}
   648  			correctNumberOfLeadersFound = true
   649  		} else {
   650  			correctNumberOfLeadersFound = false
   651  		}
   652  		time.Sleep(testPollInterval)
   653  	}
   654  	logger.Warning("Incorrect number of leaders", leaders)
   655  	for i, s := range services {
   656  		logger.Warning("Peer at index", i, "is leader", s.IsLeader())
   657  	}
   658  	return false
   659  }
   660  
   661  func waitForLeaderElection(services []*electionService, timeout time.Duration, testPollInterval time.Duration) bool {
   662  	return waitForMultipleLeadersElection(services, 1, timeout, testPollInterval)
   663  }
   664  
   665  func stopServices(services []*electionService) {
   666  	for _, service := range services {
   667  		service.Stop()
   668  	}
   669  }
   670  
   671  func stopPeers(peers []*gossipGRPC) {
   672  	for _, peer := range peers {
   673  		peer.Stop()
   674  	}
   675  }
   676  
   677  func addPeersToChannel(channel string, peers []*gossipGRPC, peerIndexes []int) {
   678  	jcm := &joinChanMsg{}
   679  
   680  	for _, i := range peerIndexes {
   681  		peers[i].JoinChan(jcm, gossipcommon.ChannelID(channel))
   682  		peers[i].UpdateLedgerHeight(0, gossipcommon.ChannelID(channel))
   683  	}
   684  }
   685  
   686  func startPeers(serviceConfig *ServiceConfig, n int, boot ...int) []*gossipGRPC {
   687  	var ports []int
   688  	var grpcs []*comm.GRPCServer
   689  	var certs []*gossipcommon.TLSCertificates
   690  	var secDialOpts []api.PeerSecureDialOpts
   691  
   692  	for i := 0; i < n; i++ {
   693  		port, grpc, cert, secDialOpt, _ := util.CreateGRPCLayer()
   694  		ports = append(ports, port)
   695  		grpcs = append(grpcs, grpc)
   696  		certs = append(certs, cert)
   697  		secDialOpts = append(secDialOpts, secDialOpt)
   698  	}
   699  
   700  	var bootPorts []int
   701  	for _, index := range boot {
   702  		bootPorts = append(bootPorts, ports[index])
   703  	}
   704  
   705  	peers := make([]*gossipGRPC, n)
   706  	for i := 0; i < n; i++ {
   707  		peers[i] = newGossipInstance(serviceConfig, ports[i], i, grpcs[i], certs[i], secDialOpts[i], 100, bootPorts...)
   708  	}
   709  
   710  	return peers
   711  }
   712  
   713  func newGossipInstance(serviceConfig *ServiceConfig, port int, id int, gRPCServer *comm.GRPCServer, certs *gossipcommon.TLSCertificates,
   714  	secureDialOpts api.PeerSecureDialOpts, maxMsgCount int, bootPorts ...int) *gossipGRPC {
   715  	conf := &gossip.Config{
   716  		BindPort:                     port,
   717  		BootstrapPeers:               bootPeers(bootPorts...),
   718  		ID:                           fmt.Sprintf("p%d", id),
   719  		MaxBlockCountToStore:         maxMsgCount,
   720  		MaxPropagationBurstLatency:   time.Duration(500) * time.Millisecond,
   721  		MaxPropagationBurstSize:      20,
   722  		PropagateIterations:          1,
   723  		PropagatePeerNum:             3,
   724  		PullInterval:                 time.Duration(2) * time.Second,
   725  		PullPeerNum:                  5,
   726  		InternalEndpoint:             fmt.Sprintf("127.0.0.1:%d", port),
   727  		ExternalEndpoint:             fmt.Sprintf("1.2.3.4:%d", port),
   728  		PublishCertPeriod:            time.Duration(4) * time.Second,
   729  		PublishStateInfoInterval:     time.Duration(1) * time.Second,
   730  		RequestStateInfoInterval:     time.Duration(1) * time.Second,
   731  		TimeForMembershipTracker:     time.Second * 5,
   732  		TLSCerts:                     certs,
   733  		DigestWaitTime:               algo.DefDigestWaitTime,
   734  		RequestWaitTime:              algo.DefRequestWaitTime,
   735  		ResponseWaitTime:             algo.DefResponseWaitTime,
   736  		DialTimeout:                  gcomm.DefDialTimeout,
   737  		ConnTimeout:                  gcomm.DefConnTimeout,
   738  		RecvBuffSize:                 gcomm.DefRecvBuffSize,
   739  		SendBuffSize:                 gcomm.DefSendBuffSize,
   740  		MsgExpirationTimeout:         channel.DefMsgExpirationTimeout,
   741  		AliveTimeInterval:            discovery.DefAliveTimeInterval,
   742  		AliveExpirationTimeout:       discovery.DefAliveExpirationTimeout,
   743  		AliveExpirationCheckInterval: discovery.DefAliveExpirationCheckInterval,
   744  		ReconnectInterval:            time.Duration(1) * time.Second,
   745  	}
   746  	selfID := api.PeerIdentityType(conf.InternalEndpoint)
   747  	cryptoService := &naiveCryptoService{}
   748  	metrics := gossipmetrics.NewGossipMetrics(&disabled.Provider{})
   749  	gossip := gossip.New(
   750  		conf,
   751  		gRPCServer.Server(),
   752  		&orgCryptoService{},
   753  		cryptoService,
   754  		selfID,
   755  		secureDialOpts,
   756  		metrics,
   757  	)
   758  	go gRPCServer.Start()
   759  
   760  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(factory.GetDefault()))
   761  	gossipService := &GossipService{
   762  		mcs:             cryptoService,
   763  		gossipSvc:       gossip,
   764  		chains:          make(map[string]state.GossipStateProvider),
   765  		leaderElection:  make(map[string]election.LeaderElectionService),
   766  		privateHandlers: make(map[string]privateHandler),
   767  		deliveryService: make(map[string]deliverservice.DeliverService),
   768  		deliveryFactory: &deliveryFactoryImpl{
   769  			credentialSupport: comm.NewCredentialSupport(),
   770  		},
   771  		peerIdentity:   api.PeerIdentityType(conf.InternalEndpoint),
   772  		secAdv:         secAdv,
   773  		metrics:        metrics,
   774  		serviceConfig:  serviceConfig,
   775  		privdataConfig: privdata.GlobalConfig(),
   776  	}
   777  
   778  	return &gossipGRPC{GossipService: gossipService, grpc: gRPCServer}
   779  }
   780  
   781  type gossipGRPC struct {
   782  	*GossipService
   783  	grpc *comm.GRPCServer
   784  }
   785  
   786  func (g *gossipGRPC) Stop() {
   787  	g.GossipService.Stop()
   788  	g.grpc.Stop()
   789  }
   790  
   791  func bootPeers(ports ...int) []string {
   792  	var peers []string
   793  	for _, port := range ports {
   794  		peers = append(peers, fmt.Sprintf("127.0.0.1:%d", port))
   795  	}
   796  	return peers
   797  }
   798  
   799  func getAvailablePort(t *testing.T) (endpoint string, ll net.Listener) {
   800  	ll, err := net.Listen("tcp", "127.0.0.1:0")
   801  	assert.NoError(t, err)
   802  	endpoint = ll.Addr().String()
   803  	return endpoint, ll
   804  }
   805  
   806  type naiveCryptoService struct {
   807  }
   808  
   809  type orgCryptoService struct {
   810  }
   811  
   812  // OrgByPeerIdentity returns the OrgIdentityType
   813  // of a given peer identity
   814  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
   815  	return orgInChannelA
   816  }
   817  
   818  // Verify verifies a JoinChanMessage, returns nil on success,
   819  // and an error on failure
   820  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
   821  	return nil
   822  }
   823  
   824  func (naiveCryptoService) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) {
   825  	return time.Now().Add(time.Hour), nil
   826  }
   827  
   828  // VerifyByChannel verifies a peer's signature on a message in the context
   829  // of a specific channel
   830  func (*naiveCryptoService) VerifyByChannel(_ gossipcommon.ChannelID, _ api.PeerIdentityType, _, _ []byte) error {
   831  	return nil
   832  }
   833  
   834  func (*naiveCryptoService) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   835  	return nil
   836  }
   837  
   838  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   839  func (*naiveCryptoService) GetPKIidOfCert(peerIdentity api.PeerIdentityType) gossipcommon.PKIidType {
   840  	return gossipcommon.PKIidType(peerIdentity)
   841  }
   842  
   843  // VerifyBlock returns nil if the block is properly signed,
   844  // else returns error
   845  func (*naiveCryptoService) VerifyBlock(chainID gossipcommon.ChannelID, seqNum uint64, signedBlock *common.Block) error {
   846  	return nil
   847  }
   848  
   849  // Sign signs msg with this peer's signing key and outputs
   850  // the signature if no error occurred.
   851  func (*naiveCryptoService) Sign(msg []byte) ([]byte, error) {
   852  	return msg, nil
   853  }
   854  
   855  // Verify checks that signature is a valid signature of message under a peer's verification key.
   856  // If the verification succeeded, Verify returns nil meaning no error occurred.
   857  // If peerCert is nil, then the signature is verified against this peer's verification key.
   858  func (*naiveCryptoService) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   859  	equal := bytes.Equal(signature, message)
   860  	if !equal {
   861  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   862  	}
   863  	return nil
   864  }
   865  
   866  var orgInChannelA = api.OrgIdentityType("ORG1")
   867  
   868  func TestInvalidInitialization(t *testing.T) {
   869  	grpcServer := grpc.NewServer()
   870  	endpoint, socket := getAvailablePort(t)
   871  
   872  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   873  	assert.NoError(t, err)
   874  
   875  	mockSignerSerializer := &mocks.SignerSerializer{}
   876  	mockSignerSerializer.SerializeReturns(api.PeerIdentityType("peer-identity"), nil)
   877  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(cryptoProvider))
   878  	gossipConfig, err := gossip.GlobalConfig(endpoint, nil)
   879  	assert.NoError(t, err)
   880  
   881  	grpcClient, err := comm.NewGRPCClient(comm.ClientConfig{})
   882  	require.NoError(t, err)
   883  
   884  	gossipService, err := New(
   885  		mockSignerSerializer,
   886  		gossipmetrics.NewGossipMetrics(&disabled.Provider{}),
   887  		endpoint,
   888  		grpcServer,
   889  		&naiveCryptoService{},
   890  		secAdv,
   891  		nil,
   892  		comm.NewCredentialSupport(),
   893  		grpcClient,
   894  		gossipConfig,
   895  		&ServiceConfig{},
   896  		&privdata.PrivdataConfig{},
   897  		&deliverservice.DeliverServiceConfig{
   898  			PeerTLSEnabled:              false,
   899  			ReConnectBackoffThreshold:   deliverservice.DefaultReConnectBackoffThreshold,
   900  			ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold,
   901  		},
   902  	)
   903  	assert.NoError(t, err)
   904  	gService := gossipService
   905  	defer gService.Stop()
   906  
   907  	go grpcServer.Serve(socket)
   908  	defer grpcServer.Stop()
   909  
   910  	dc := gService.deliveryFactory.Service(gService, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), &naiveCryptoService{}, false)
   911  	assert.NotNil(t, dc)
   912  }
   913  
   914  func TestChannelConfig(t *testing.T) {
   915  	// Test whenever gossip service is indeed singleton
   916  	grpcServer := grpc.NewServer()
   917  	endpoint, socket := getAvailablePort(t)
   918  
   919  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   920  	assert.NoError(t, err)
   921  
   922  	mockSignerSerializer := &mocks.SignerSerializer{}
   923  	mockSignerSerializer.SerializeReturns(api.PeerIdentityType("peer-identity"), nil)
   924  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(cryptoProvider))
   925  	gossipConfig, err := gossip.GlobalConfig(endpoint, nil)
   926  	assert.NoError(t, err)
   927  
   928  	grpcClient, err := comm.NewGRPCClient(comm.ClientConfig{})
   929  
   930  	gossipService, err := New(
   931  		mockSignerSerializer,
   932  		gossipmetrics.NewGossipMetrics(&disabled.Provider{}),
   933  		endpoint,
   934  		grpcServer,
   935  		&naiveCryptoService{},
   936  		secAdv,
   937  		nil,
   938  		nil,
   939  		grpcClient,
   940  		gossipConfig,
   941  		&ServiceConfig{},
   942  		&privdata.PrivdataConfig{},
   943  		&deliverservice.DeliverServiceConfig{
   944  			ReConnectBackoffThreshold:   deliverservice.DefaultReConnectBackoffThreshold,
   945  			ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold,
   946  		},
   947  	)
   948  	assert.NoError(t, err)
   949  	gService := gossipService
   950  	defer gService.Stop()
   951  
   952  	go grpcServer.Serve(socket)
   953  	defer grpcServer.Stop()
   954  
   955  	jcm := &joinChannelMessage{seqNum: 1, members2AnchorPeers: map[string][]api.AnchorPeer{
   956  		"A": {{Host: "host", Port: 5000}},
   957  	}}
   958  
   959  	assert.Equal(t, uint64(1), jcm.SequenceNumber())
   960  
   961  	mc := &mockConfig{
   962  		sequence: 1,
   963  		orgs: map[string]channelconfig.ApplicationOrg{
   964  			string(orgInChannelA): &appGrp{
   965  				mspID:       string(orgInChannelA),
   966  				anchorPeers: []*peer.AnchorPeer{},
   967  			},
   968  		},
   969  	}
   970  	gService.JoinChan(jcm, gossipcommon.ChannelID("A"))
   971  	gService.updateAnchors(mc)
   972  	assert.True(t, gService.amIinChannel(string(orgInChannelA), mc))
   973  }