github.com/kaituanwang/hyperledger@v2.0.1+incompatible/gossip/service/gossip_service_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package service
     8  
     9  import (
    10  	"bytes"
    11  	"fmt"
    12  	"io/ioutil"
    13  	"net"
    14  	"os"
    15  	"sync"
    16  	"testing"
    17  	"time"
    18  
    19  	"github.com/hyperledger/fabric-protos-go/common"
    20  	"github.com/hyperledger/fabric-protos-go/peer"
    21  	transientstore2 "github.com/hyperledger/fabric-protos-go/transientstore"
    22  	"github.com/hyperledger/fabric/bccsp/factory"
    23  	"github.com/hyperledger/fabric/bccsp/sw"
    24  	"github.com/hyperledger/fabric/common/channelconfig"
    25  	"github.com/hyperledger/fabric/common/flogging"
    26  	"github.com/hyperledger/fabric/common/metrics/disabled"
    27  	"github.com/hyperledger/fabric/core/comm"
    28  	"github.com/hyperledger/fabric/core/deliverservice"
    29  	"github.com/hyperledger/fabric/core/ledger"
    30  	"github.com/hyperledger/fabric/core/transientstore"
    31  	"github.com/hyperledger/fabric/gossip/api"
    32  	gcomm "github.com/hyperledger/fabric/gossip/comm"
    33  	gossipcommon "github.com/hyperledger/fabric/gossip/common"
    34  	"github.com/hyperledger/fabric/gossip/discovery"
    35  	"github.com/hyperledger/fabric/gossip/election"
    36  	"github.com/hyperledger/fabric/gossip/gossip"
    37  	"github.com/hyperledger/fabric/gossip/gossip/algo"
    38  	"github.com/hyperledger/fabric/gossip/gossip/channel"
    39  	gossipmetrics "github.com/hyperledger/fabric/gossip/metrics"
    40  	"github.com/hyperledger/fabric/gossip/privdata"
    41  	"github.com/hyperledger/fabric/gossip/state"
    42  	"github.com/hyperledger/fabric/gossip/util"
    43  	peergossip "github.com/hyperledger/fabric/internal/peer/gossip"
    44  	"github.com/hyperledger/fabric/internal/peer/gossip/mocks"
    45  	"github.com/hyperledger/fabric/internal/pkg/identity"
    46  	"github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider"
    47  	"github.com/hyperledger/fabric/internal/pkg/peer/orderers"
    48  	"github.com/hyperledger/fabric/msp/mgmt"
    49  	msptesttools "github.com/hyperledger/fabric/msp/mgmt/testtools"
    50  	"github.com/stretchr/testify/assert"
    51  	"github.com/stretchr/testify/require"
    52  	"google.golang.org/grpc"
    53  )
    54  
    55  func init() {
    56  	util.SetupTestLogging()
    57  }
    58  
    59  //go:generate counterfeiter -o mocks/signer_serializer.go --fake-name SignerSerializer . signerSerializer
    60  
    61  type signerSerializer interface {
    62  	identity.SignerSerializer
    63  }
    64  
    65  type testTransientStore struct {
    66  	storeProvider transientstore.StoreProvider
    67  	Store         *transientstore.Store
    68  	tempdir       string
    69  }
    70  
    71  func newTransientStore(t *testing.T) *testTransientStore {
    72  	s := &testTransientStore{}
    73  	var err error
    74  	s.tempdir, err = ioutil.TempDir("", "ts")
    75  	if err != nil {
    76  		t.Fatalf("Failed to create test directory, got err %s", err)
    77  		return s
    78  	}
    79  	s.storeProvider, err = transientstore.NewStoreProvider(s.tempdir)
    80  	if err != nil {
    81  		t.Fatalf("Failed to open store, got err %s", err)
    82  		return s
    83  	}
    84  	s.Store, err = s.storeProvider.OpenStore("test")
    85  	if err != nil {
    86  		t.Fatalf("Failed to open store, got err %s", err)
    87  		return s
    88  	}
    89  	return s
    90  }
    91  
    92  func (s *testTransientStore) tearDown() {
    93  	s.storeProvider.Close()
    94  	os.RemoveAll(s.tempdir)
    95  }
    96  
    97  func (s *testTransientStore) Persist(txid string, blockHeight uint64,
    98  	privateSimulationResultsWithConfig *transientstore2.TxPvtReadWriteSetWithConfigInfo) error {
    99  	return s.Store.Persist(txid, blockHeight, privateSimulationResultsWithConfig)
   100  }
   101  
   102  func (s *testTransientStore) GetTxPvtRWSetByTxid(txid string, filter ledger.PvtNsCollFilter) (privdata.RWSetScanner, error) {
   103  	return s.Store.GetTxPvtRWSetByTxid(txid, filter)
   104  }
   105  
   106  func TestInitGossipService(t *testing.T) {
   107  	grpcServer := grpc.NewServer()
   108  	endpoint, socket := getAvailablePort(t)
   109  
   110  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   111  	assert.NoError(t, err)
   112  
   113  	msptesttools.LoadMSPSetupForTesting()
   114  	signer := mgmt.GetLocalSigningIdentityOrPanic(cryptoProvider)
   115  
   116  	messageCryptoService := peergossip.NewMCS(&mocks.ChannelPolicyManagerGetter{}, signer, mgmt.NewDeserializersManager(cryptoProvider), cryptoProvider)
   117  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(cryptoProvider))
   118  	gossipConfig, err := gossip.GlobalConfig(endpoint, nil)
   119  	assert.NoError(t, err)
   120  
   121  	grpcClient, err := comm.NewGRPCClient(comm.ClientConfig{})
   122  	require.NoError(t, err)
   123  
   124  	gossipService, err := New(
   125  		signer,
   126  		gossipmetrics.NewGossipMetrics(&disabled.Provider{}),
   127  		endpoint,
   128  		grpcServer,
   129  		messageCryptoService,
   130  		secAdv,
   131  		nil,
   132  		comm.NewCredentialSupport(),
   133  		grpcClient,
   134  		gossipConfig,
   135  		&ServiceConfig{},
   136  		&deliverservice.DeliverServiceConfig{
   137  			ReConnectBackoffThreshold:   deliverservice.DefaultReConnectBackoffThreshold,
   138  			ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold,
   139  		},
   140  	)
   141  	assert.NoError(t, err)
   142  
   143  	go grpcServer.Serve(socket)
   144  	defer grpcServer.Stop()
   145  
   146  	defer gossipService.Stop()
   147  }
   148  
   149  // Make sure *joinChannelMessage implements the api.JoinChannelMessage
   150  func TestJCMInterface(t *testing.T) {
   151  	_ = api.JoinChannelMessage(&joinChannelMessage{})
   152  	t.Parallel()
   153  }
   154  
   155  func TestLeaderElectionWithDeliverClient(t *testing.T) {
   156  	t.Parallel()
   157  	//Test check if leader election works with mock deliver service instance
   158  	//Configuration set to use dynamic leader election
   159  	//10 peers started, added to channel and at the end we check if only for one peer
   160  	//mockDeliverService.StartDeliverForChannel was invoked
   161  
   162  	n := 10
   163  	serviceConfig := &ServiceConfig{
   164  		UseLeaderElection:                true,
   165  		OrgLeader:                        false,
   166  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   167  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   168  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   169  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   170  	}
   171  	gossips := startPeers(t, serviceConfig, n, 0, 1, 2, 3, 4)
   172  
   173  	channelName := "chanA"
   174  	peerIndexes := make([]int, n)
   175  	for i := 0; i < n; i++ {
   176  		peerIndexes[i] = i
   177  	}
   178  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   179  
   180  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*20, time.Second*2)
   181  
   182  	services := make([]*electionService, n)
   183  
   184  	store := newTransientStore(t)
   185  	defer store.tearDown()
   186  
   187  	for i := 0; i < n; i++ {
   188  		deliverServiceFactory := &mockDeliverServiceFactory{
   189  			service: &mockDeliverService{
   190  				running: make(map[string]bool),
   191  			},
   192  		}
   193  		gossips[i].deliveryFactory = deliverServiceFactory
   194  		deliverServiceFactory.service.running[channelName] = false
   195  
   196  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   197  			Committer: &mockLedgerInfo{1},
   198  		})
   199  		service, exist := gossips[i].leaderElection[channelName]
   200  		assert.True(t, exist, "Leader election service should be created for peer %d and channel %s", i, channelName)
   201  		services[i] = &electionService{nil, false, 0}
   202  		services[i].LeaderElectionService = service
   203  	}
   204  
   205  	// Is single leader was elected.
   206  	assert.True(t, waitForLeaderElection(t, services, time.Second*30, time.Second*2), "One leader should be selected")
   207  
   208  	startsNum := 0
   209  	for i := 0; i < n; i++ {
   210  		// Is mockDeliverService.StartDeliverForChannel in current peer for the specific channel was invoked
   211  		if gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName] {
   212  			startsNum++
   213  		}
   214  	}
   215  
   216  	assert.Equal(t, 1, startsNum, "Only for one peer delivery client should start")
   217  
   218  	stopPeers(gossips)
   219  }
   220  
   221  func TestWithStaticDeliverClientLeader(t *testing.T) {
   222  	// Tests check if static leader flag works ok.
   223  	// Leader election flag set to false, and static leader flag set to true
   224  	// Two gossip service instances (peers) created.
   225  	// Each peer is added to channel and should run mock delivery client
   226  	// After that each peer added to another client and it should run deliver client for this channel as well.
   227  
   228  	serviceConfig := &ServiceConfig{
   229  		UseLeaderElection:                false,
   230  		OrgLeader:                        true,
   231  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   232  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   233  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   234  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   235  	}
   236  	n := 2
   237  	gossips := startPeers(t, serviceConfig, n, 0, 1)
   238  	channelName := "chanA"
   239  	peerIndexes := make([]int, n)
   240  	for i := 0; i < n; i++ {
   241  		peerIndexes[i] = i
   242  	}
   243  
   244  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   245  
   246  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*30, time.Second*2)
   247  
   248  	store := newTransientStore(t)
   249  	defer store.tearDown()
   250  
   251  	deliverServiceFactory := &mockDeliverServiceFactory{
   252  		service: &mockDeliverService{
   253  			running: make(map[string]bool),
   254  		},
   255  	}
   256  
   257  	for i := 0; i < n; i++ {
   258  		gossips[i].deliveryFactory = deliverServiceFactory
   259  		deliverServiceFactory.service.running[channelName] = false
   260  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   261  			Committer: &mockLedgerInfo{1},
   262  		})
   263  	}
   264  
   265  	for i := 0; i < n; i++ {
   266  		assert.NotNil(t, gossips[i].deliveryService[channelName], "Delivery service for channel %s not initiated in peer %d", channelName, i)
   267  		assert.True(t, gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName], "Block deliverer not started for peer %d", i)
   268  	}
   269  
   270  	channelName = "chanB"
   271  	for i := 0; i < n; i++ {
   272  		deliverServiceFactory.service.running[channelName] = false
   273  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   274  			Committer: &mockLedgerInfo{1},
   275  		})
   276  	}
   277  
   278  	for i := 0; i < n; i++ {
   279  		assert.NotNil(t, gossips[i].deliveryService[channelName], "Delivery service for channel %s not initiated in peer %d", channelName, i)
   280  		assert.True(t, gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName], "Block deliverer not started for peer %d", i)
   281  	}
   282  
   283  	stopPeers(gossips)
   284  }
   285  
   286  func TestWithStaticDeliverClientNotLeader(t *testing.T) {
   287  
   288  	serviceConfig := &ServiceConfig{
   289  		UseLeaderElection:                false,
   290  		OrgLeader:                        false,
   291  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   292  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   293  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   294  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   295  	}
   296  	n := 2
   297  	gossips := startPeers(t, serviceConfig, n, 0, 1)
   298  
   299  	channelName := "chanA"
   300  	peerIndexes := make([]int, n)
   301  	for i := 0; i < n; i++ {
   302  		peerIndexes[i] = i
   303  	}
   304  
   305  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   306  
   307  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*30, time.Second*2)
   308  
   309  	store := newTransientStore(t)
   310  	defer store.tearDown()
   311  
   312  	deliverServiceFactory := &mockDeliverServiceFactory{
   313  		service: &mockDeliverService{
   314  			running: make(map[string]bool),
   315  		},
   316  	}
   317  
   318  	for i := 0; i < n; i++ {
   319  		gossips[i].deliveryFactory = deliverServiceFactory
   320  		deliverServiceFactory.service.running[channelName] = false
   321  		gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   322  			Committer: &mockLedgerInfo{1},
   323  		})
   324  	}
   325  
   326  	for i := 0; i < n; i++ {
   327  		assert.NotNil(t, gossips[i].deliveryService[channelName], "Delivery service for channel %s not initiated in peer %d", channelName, i)
   328  		assert.False(t, gossips[i].deliveryService[channelName].(*mockDeliverService).running[channelName], "Block deliverer should not be started for peer %d", i)
   329  	}
   330  
   331  	stopPeers(gossips)
   332  }
   333  
   334  func TestWithStaticDeliverClientBothStaticAndLeaderElection(t *testing.T) {
   335  
   336  	serviceConfig := &ServiceConfig{
   337  		UseLeaderElection:                true,
   338  		OrgLeader:                        true,
   339  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   340  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   341  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   342  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   343  	}
   344  	n := 2
   345  	gossips := startPeers(t, serviceConfig, n, 0, 1)
   346  
   347  	channelName := "chanA"
   348  	peerIndexes := make([]int, n)
   349  	for i := 0; i < n; i++ {
   350  		peerIndexes[i] = i
   351  	}
   352  
   353  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   354  
   355  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*30, time.Second*2)
   356  
   357  	store := newTransientStore(t)
   358  	defer store.tearDown()
   359  
   360  	deliverServiceFactory := &mockDeliverServiceFactory{
   361  		service: &mockDeliverService{
   362  			running: make(map[string]bool),
   363  		},
   364  	}
   365  
   366  	for i := 0; i < n; i++ {
   367  		gossips[i].deliveryFactory = deliverServiceFactory
   368  		assert.Panics(t, func() {
   369  			gossips[i].InitializeChannel(channelName, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), store.Store, Support{
   370  				Committer: &mockLedgerInfo{1},
   371  			})
   372  		}, "Dynamic leader election based and static connection to ordering service can't exist simultaneously")
   373  	}
   374  
   375  	stopPeers(gossips)
   376  }
   377  
   378  type mockDeliverServiceFactory struct {
   379  	service *mockDeliverService
   380  }
   381  
   382  func (mf *mockDeliverServiceFactory) Service(GossipServiceAdapter, *orderers.ConnectionSource, api.MessageCryptoService, bool) deliverservice.DeliverService {
   383  	return mf.service
   384  }
   385  
   386  type mockDeliverService struct {
   387  	running map[string]bool
   388  }
   389  
   390  func (ds *mockDeliverService) StartDeliverForChannel(chainID string, ledgerInfo blocksprovider.LedgerInfo, finalizer func()) error {
   391  	ds.running[chainID] = true
   392  	return nil
   393  }
   394  
   395  func (ds *mockDeliverService) StopDeliverForChannel(chainID string) error {
   396  	ds.running[chainID] = false
   397  	return nil
   398  }
   399  
   400  func (ds *mockDeliverService) Stop() {
   401  }
   402  
   403  type mockLedgerInfo struct {
   404  	Height uint64
   405  }
   406  
   407  func (li *mockLedgerInfo) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   408  	panic("implement me")
   409  }
   410  
   411  func (li *mockLedgerInfo) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   412  	panic("implement me")
   413  }
   414  
   415  func (li *mockLedgerInfo) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   416  	panic("implement me")
   417  }
   418  
   419  func (li *mockLedgerInfo) CommitLegacy(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   420  	panic("implement me")
   421  }
   422  
   423  func (li *mockLedgerInfo) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   424  	panic("implement me")
   425  }
   426  
   427  func (li *mockLedgerInfo) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) {
   428  	panic("implement me")
   429  }
   430  
   431  // LedgerHeight returns mocked value to the ledger height
   432  func (li *mockLedgerInfo) LedgerHeight() (uint64, error) {
   433  	return li.Height, nil
   434  }
   435  
   436  func (li *mockLedgerInfo) DoesPvtDataInfoExistInLedger(blkNum uint64) (bool, error) {
   437  	return false, nil
   438  }
   439  
   440  // Commit block to the ledger
   441  func (li *mockLedgerInfo) Commit(block *common.Block) error {
   442  	return nil
   443  }
   444  
   445  // Gets blocks with sequence numbers provided in the slice
   446  func (li *mockLedgerInfo) GetBlocks(blockSeqs []uint64) []*common.Block {
   447  	return make([]*common.Block, 0)
   448  }
   449  
   450  // Closes committing service
   451  func (li *mockLedgerInfo) Close() {
   452  }
   453  
   454  func TestLeaderElectionWithRealGossip(t *testing.T) {
   455  	t.Parallel()
   456  	// Spawn 10 gossip instances with single channel and inside same organization
   457  	// Run leader election on top of each gossip instance and check that only one leader chosen
   458  	// Create another channel includes sub-set of peers over same gossip instances {1,3,5,7}
   459  	// Run additional leader election services for new channel
   460  	// Check correct leader still exist for first channel and new correct leader chosen in second channel
   461  	// Stop gossip instances of leader peers for both channels and see that new leader chosen for both
   462  
   463  	// Creating gossip service instances for peers
   464  	serviceConfig := &ServiceConfig{
   465  		UseLeaderElection:                false,
   466  		OrgLeader:                        false,
   467  		ElectionStartupGracePeriod:       election.DefStartupGracePeriod,
   468  		ElectionMembershipSampleInterval: election.DefMembershipSampleInterval,
   469  		ElectionLeaderAliveThreshold:     election.DefLeaderAliveThreshold,
   470  		ElectionLeaderElectionDuration:   election.DefLeaderElectionDuration,
   471  	}
   472  
   473  	n := 10
   474  	gossips := startPeers(t, serviceConfig, n, 0, 1, 2, 3, 4)
   475  	// Joining all peers to first channel
   476  	channelName := "chanA"
   477  	peerIndexes := make([]int, n)
   478  	for i := 0; i < n; i++ {
   479  		peerIndexes[i] = i
   480  	}
   481  	addPeersToChannel(t, n, channelName, gossips, peerIndexes)
   482  
   483  	waitForFullMembershipOrFailNow(t, channelName, gossips, n, time.Second*30, time.Second*2)
   484  
   485  	logger.Warning("Starting leader election services")
   486  
   487  	//Starting leader election services
   488  	services := make([]*electionService, n)
   489  
   490  	electionMetrics := gossipmetrics.NewGossipMetrics(&disabled.Provider{}).ElectionMetrics
   491  
   492  	for i := 0; i < n; i++ {
   493  		services[i] = &electionService{nil, false, 0}
   494  		services[i].LeaderElectionService = gossips[i].newLeaderElectionComponent(channelName, services[i].callback, electionMetrics)
   495  	}
   496  
   497  	logger.Warning("Waiting for leader election")
   498  
   499  	assert.True(t, waitForLeaderElection(t, services, time.Second*30, time.Second*2), "One leader should be selected")
   500  
   501  	startsNum := 0
   502  	for i := 0; i < n; i++ {
   503  		// Is callback function was invoked by this leader election service instance
   504  		if services[i].callbackInvokeRes {
   505  			startsNum++
   506  		}
   507  	}
   508  	//Only leader should invoke callback function, so it is double check that only one leader exists
   509  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanA")
   510  
   511  	// Adding some peers to new channel and creating leader election services for peers in new channel
   512  	// Expecting peer 1 (first in list of election services) to become leader of second channel
   513  	secondChannelPeerIndexes := []int{1, 3, 5, 7}
   514  	secondChannelName := "chanB"
   515  	secondChannelServices := make([]*electionService, len(secondChannelPeerIndexes))
   516  	addPeersToChannel(t, n, secondChannelName, gossips, secondChannelPeerIndexes)
   517  
   518  	secondChannelGossips := make([]*gossipGRPC, 0)
   519  	for _, i := range secondChannelPeerIndexes {
   520  		secondChannelGossips = append(secondChannelGossips, gossips[i])
   521  	}
   522  	waitForFullMembershipOrFailNow(t, secondChannelName, secondChannelGossips, len(secondChannelGossips), time.Second*30, time.Millisecond*100)
   523  
   524  	for idx, i := range secondChannelPeerIndexes {
   525  		secondChannelServices[idx] = &electionService{nil, false, 0}
   526  		secondChannelServices[idx].LeaderElectionService =
   527  			gossips[i].newLeaderElectionComponent(secondChannelName, secondChannelServices[idx].callback, electionMetrics)
   528  	}
   529  
   530  	assert.True(t, waitForLeaderElection(t, secondChannelServices, time.Second*30, time.Second*2), "One leader should be selected for chanB")
   531  	assert.True(t, waitForLeaderElection(t, services, time.Second*30, time.Second*2), "One leader should be selected for chanA")
   532  
   533  	startsNum = 0
   534  	for i := 0; i < n; i++ {
   535  		if services[i].callbackInvokeRes {
   536  			startsNum++
   537  		}
   538  	}
   539  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanA")
   540  
   541  	startsNum = 0
   542  	for i := 0; i < len(secondChannelServices); i++ {
   543  		if secondChannelServices[i].callbackInvokeRes {
   544  			startsNum++
   545  		}
   546  	}
   547  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called - chanB")
   548  
   549  	//Stopping 2 gossip instances(peer 0 and peer 1), should init re-election
   550  	//Now peer 2 become leader for first channel and peer 3 for second channel
   551  
   552  	logger.Warning("Killing 2 peers, initiation new leader election")
   553  
   554  	stopPeers(gossips[:2])
   555  
   556  	waitForFullMembershipOrFailNow(t, channelName, gossips[2:], n-2, time.Second*30, time.Millisecond*100)
   557  	waitForFullMembershipOrFailNow(t, secondChannelName, secondChannelGossips[1:], len(secondChannelGossips)-1, time.Second*30, time.Millisecond*100)
   558  
   559  	assert.True(t, waitForLeaderElection(t, services[2:], time.Second*30, time.Second*2), "One leader should be selected after re-election - chanA")
   560  	assert.True(t, waitForLeaderElection(t, secondChannelServices[1:], time.Second*30, time.Second*2), "One leader should be selected after re-election - chanB")
   561  
   562  	startsNum = 0
   563  	for i := 2; i < n; i++ {
   564  		if services[i].callbackInvokeRes {
   565  			startsNum++
   566  		}
   567  	}
   568  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called after re-election - chanA")
   569  
   570  	startsNum = 0
   571  	for i := 1; i < len(secondChannelServices); i++ {
   572  		if secondChannelServices[i].callbackInvokeRes {
   573  			startsNum++
   574  		}
   575  	}
   576  	assert.Equal(t, 1, startsNum, "Only for one peer callback function should be called after re-election - chanB")
   577  
   578  	stopServices(secondChannelServices)
   579  	stopServices(services)
   580  	stopPeers(gossips[2:])
   581  }
   582  
   583  type electionService struct {
   584  	election.LeaderElectionService
   585  	callbackInvokeRes   bool
   586  	callbackInvokeCount int
   587  }
   588  
   589  func (es *electionService) callback(isLeader bool) {
   590  	es.callbackInvokeRes = isLeader
   591  	es.callbackInvokeCount = es.callbackInvokeCount + 1
   592  }
   593  
   594  type joinChanMsg struct {
   595  }
   596  
   597  // SequenceNumber returns the sequence number of the block this joinChanMsg
   598  // is derived from
   599  func (jmc *joinChanMsg) SequenceNumber() uint64 {
   600  	return uint64(time.Now().UnixNano())
   601  }
   602  
   603  // Members returns the organizations of the channel
   604  func (jmc *joinChanMsg) Members() []api.OrgIdentityType {
   605  	return []api.OrgIdentityType{orgInChannelA}
   606  }
   607  
   608  // AnchorPeersOf returns the anchor peers of the given organization
   609  func (jmc *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
   610  	return []api.AnchorPeer{}
   611  }
   612  
   613  func waitForFullMembershipOrFailNow(t *testing.T, channel string, gossips []*gossipGRPC, peersNum int, timeout time.Duration, testPollInterval time.Duration) {
   614  	end := time.Now().Add(timeout)
   615  	var correctPeers int
   616  	for time.Now().Before(end) {
   617  		correctPeers = 0
   618  		for _, g := range gossips {
   619  			if len(g.PeersOfChannel(gossipcommon.ChannelID(channel))) == (peersNum - 1) {
   620  				correctPeers++
   621  			}
   622  		}
   623  		if correctPeers == peersNum {
   624  			return
   625  		}
   626  		time.Sleep(testPollInterval)
   627  	}
   628  	t.Fatalf("Failed to establish full channel membership. Only %d out of %d peers have full membership", correctPeers, peersNum)
   629  }
   630  
   631  func waitForMultipleLeadersElection(t *testing.T, services []*electionService, leadersNum int, timeout time.Duration, testPollInterval time.Duration) bool {
   632  	logger.Warning("Waiting for", leadersNum, "leaders")
   633  	end := time.Now().Add(timeout)
   634  	correctNumberOfLeadersFound := false
   635  	leaders := 0
   636  	for time.Now().Before(end) {
   637  		leaders = 0
   638  		for _, s := range services {
   639  			if s.IsLeader() {
   640  				leaders++
   641  			}
   642  		}
   643  		if leaders == leadersNum {
   644  			if correctNumberOfLeadersFound {
   645  				return true
   646  			}
   647  			correctNumberOfLeadersFound = true
   648  		} else {
   649  			correctNumberOfLeadersFound = false
   650  		}
   651  		time.Sleep(testPollInterval)
   652  	}
   653  	logger.Warning("Incorrect number of leaders", leaders)
   654  	for i, s := range services {
   655  		logger.Warning("Peer at index", i, "is leader", s.IsLeader())
   656  	}
   657  	return false
   658  }
   659  
   660  func waitForLeaderElection(t *testing.T, services []*electionService, timeout time.Duration, testPollInterval time.Duration) bool {
   661  	return waitForMultipleLeadersElection(t, services, 1, timeout, testPollInterval)
   662  }
   663  
   664  func waitUntilOrFailBlocking(t *testing.T, f func(), timeout time.Duration) {
   665  	successChan := make(chan struct{}, 1)
   666  	go func() {
   667  		f()
   668  		successChan <- struct{}{}
   669  	}()
   670  	select {
   671  	case <-time.NewTimer(timeout).C:
   672  		break
   673  	case <-successChan:
   674  		return
   675  	}
   676  	util.PrintStackTrace()
   677  	assert.Fail(t, "Timeout expired!")
   678  }
   679  
   680  func stopServices(services []*electionService) {
   681  	stoppingWg := sync.WaitGroup{}
   682  	stoppingWg.Add(len(services))
   683  	for i, sI := range services {
   684  		go func(i int, s_i election.LeaderElectionService) {
   685  			defer stoppingWg.Done()
   686  			s_i.Stop()
   687  		}(i, sI)
   688  	}
   689  	stoppingWg.Wait()
   690  	time.Sleep(time.Second * time.Duration(2))
   691  }
   692  
   693  func stopPeers(peers []*gossipGRPC) {
   694  	stoppingWg := sync.WaitGroup{}
   695  	stoppingWg.Add(len(peers))
   696  	for i, pI := range peers {
   697  		go func(i int, p_i *GossipService) {
   698  			defer stoppingWg.Done()
   699  			p_i.Stop()
   700  		}(i, pI.GossipService)
   701  	}
   702  	stoppingWg.Wait()
   703  	time.Sleep(time.Second * time.Duration(2))
   704  }
   705  
   706  func addPeersToChannel(t *testing.T, n int, channel string, peers []*gossipGRPC, peerIndexes []int) {
   707  	jcm := &joinChanMsg{}
   708  
   709  	wg := sync.WaitGroup{}
   710  	for _, i := range peerIndexes {
   711  		wg.Add(1)
   712  		go func(i int) {
   713  			peers[i].JoinChan(jcm, gossipcommon.ChannelID(channel))
   714  			peers[i].UpdateLedgerHeight(0, gossipcommon.ChannelID(channel))
   715  			wg.Done()
   716  		}(i)
   717  	}
   718  	waitUntilOrFailBlocking(t, wg.Wait, time.Second*10)
   719  }
   720  
   721  func startPeers(t *testing.T, serviceConfig *ServiceConfig, n int, boot ...int) []*gossipGRPC {
   722  	var ports []int
   723  	var grpcs []*comm.GRPCServer
   724  	var certs []*gossipcommon.TLSCertificates
   725  	var secDialOpts []api.PeerSecureDialOpts
   726  
   727  	for i := 0; i < n; i++ {
   728  		port, grpc, cert, secDialOpt, _ := util.CreateGRPCLayer()
   729  		ports = append(ports, port)
   730  		grpcs = append(grpcs, grpc)
   731  		certs = append(certs, cert)
   732  		secDialOpts = append(secDialOpts, secDialOpt)
   733  	}
   734  
   735  	var bootPorts []int
   736  	for _, index := range boot {
   737  		bootPorts = append(bootPorts, ports[index])
   738  	}
   739  
   740  	peers := make([]*gossipGRPC, n)
   741  	wg := sync.WaitGroup{}
   742  	for i := 0; i < n; i++ {
   743  		wg.Add(1)
   744  		go func(i int) {
   745  			peers[i] = newGossipInstance(serviceConfig, ports[i], i, grpcs[i], certs[i], secDialOpts[i], 100, bootPorts...)
   746  			wg.Done()
   747  		}(i)
   748  	}
   749  	waitUntilOrFailBlocking(t, wg.Wait, time.Second*10)
   750  
   751  	return peers
   752  }
   753  
   754  func newGossipInstance(serviceConfig *ServiceConfig, port int, id int, gRPCServer *comm.GRPCServer, certs *gossipcommon.TLSCertificates,
   755  	secureDialOpts api.PeerSecureDialOpts, maxMsgCount int, bootPorts ...int) *gossipGRPC {
   756  	conf := &gossip.Config{
   757  		BindPort:                     port,
   758  		BootstrapPeers:               bootPeers(bootPorts...),
   759  		ID:                           fmt.Sprintf("p%d", id),
   760  		MaxBlockCountToStore:         maxMsgCount,
   761  		MaxPropagationBurstLatency:   time.Duration(500) * time.Millisecond,
   762  		MaxPropagationBurstSize:      20,
   763  		PropagateIterations:          1,
   764  		PropagatePeerNum:             3,
   765  		PullInterval:                 time.Duration(2) * time.Second,
   766  		PullPeerNum:                  5,
   767  		InternalEndpoint:             fmt.Sprintf("127.0.0.1:%d", port),
   768  		ExternalEndpoint:             fmt.Sprintf("1.2.3.4:%d", port),
   769  		PublishCertPeriod:            time.Duration(4) * time.Second,
   770  		PublishStateInfoInterval:     time.Duration(1) * time.Second,
   771  		RequestStateInfoInterval:     time.Duration(1) * time.Second,
   772  		TimeForMembershipTracker:     time.Second * 5,
   773  		TLSCerts:                     certs,
   774  		DigestWaitTime:               algo.DefDigestWaitTime,
   775  		RequestWaitTime:              algo.DefRequestWaitTime,
   776  		ResponseWaitTime:             algo.DefResponseWaitTime,
   777  		DialTimeout:                  gcomm.DefDialTimeout,
   778  		ConnTimeout:                  gcomm.DefConnTimeout,
   779  		RecvBuffSize:                 gcomm.DefRecvBuffSize,
   780  		SendBuffSize:                 gcomm.DefSendBuffSize,
   781  		MsgExpirationTimeout:         channel.DefMsgExpirationTimeout,
   782  		AliveTimeInterval:            discovery.DefAliveTimeInterval,
   783  		AliveExpirationTimeout:       discovery.DefAliveExpirationTimeout,
   784  		AliveExpirationCheckInterval: discovery.DefAliveExpirationCheckInterval,
   785  		ReconnectInterval:            time.Duration(1) * time.Second,
   786  	}
   787  	selfID := api.PeerIdentityType(conf.InternalEndpoint)
   788  	cryptoService := &naiveCryptoService{}
   789  	metrics := gossipmetrics.NewGossipMetrics(&disabled.Provider{})
   790  	gossip := gossip.New(
   791  		conf,
   792  		gRPCServer.Server(),
   793  		&orgCryptoService{},
   794  		cryptoService,
   795  		selfID,
   796  		secureDialOpts,
   797  		metrics,
   798  	)
   799  	go gRPCServer.Start()
   800  
   801  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(factory.GetDefault()))
   802  	gossipService := &GossipService{
   803  		mcs:             cryptoService,
   804  		gossipSvc:       gossip,
   805  		chains:          make(map[string]state.GossipStateProvider),
   806  		leaderElection:  make(map[string]election.LeaderElectionService),
   807  		privateHandlers: make(map[string]privateHandler),
   808  		deliveryService: make(map[string]deliverservice.DeliverService),
   809  		deliveryFactory: &deliveryFactoryImpl{
   810  			credentialSupport: comm.NewCredentialSupport(),
   811  		},
   812  		peerIdentity:  api.PeerIdentityType(conf.InternalEndpoint),
   813  		secAdv:        secAdv,
   814  		metrics:       metrics,
   815  		serviceConfig: serviceConfig,
   816  	}
   817  
   818  	return &gossipGRPC{GossipService: gossipService, grpc: gRPCServer}
   819  }
   820  
   821  type gossipGRPC struct {
   822  	*GossipService
   823  	grpc *comm.GRPCServer
   824  }
   825  
   826  func (g *gossipGRPC) Stop() {
   827  	g.GossipService.Stop()
   828  	g.grpc.Stop()
   829  }
   830  
   831  func bootPeers(ports ...int) []string {
   832  	var peers []string
   833  	for _, port := range ports {
   834  		peers = append(peers, fmt.Sprintf("127.0.0.1:%d", port))
   835  	}
   836  	return peers
   837  }
   838  
   839  func getAvailablePort(t *testing.T) (endpoint string, ll net.Listener) {
   840  	ll, err := net.Listen("tcp", "127.0.0.1:0")
   841  	assert.NoError(t, err)
   842  	endpoint = ll.Addr().String()
   843  	return endpoint, ll
   844  }
   845  
   846  type naiveCryptoService struct {
   847  }
   848  
   849  type orgCryptoService struct {
   850  }
   851  
   852  // OrgByPeerIdentity returns the OrgIdentityType
   853  // of a given peer identity
   854  func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
   855  	return orgInChannelA
   856  }
   857  
   858  // Verify verifies a JoinChanMessage, returns nil on success,
   859  // and an error on failure
   860  func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
   861  	return nil
   862  }
   863  
   864  func (naiveCryptoService) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) {
   865  	return time.Now().Add(time.Hour), nil
   866  }
   867  
   868  // VerifyByChannel verifies a peer's signature on a message in the context
   869  // of a specific channel
   870  func (*naiveCryptoService) VerifyByChannel(_ gossipcommon.ChannelID, _ api.PeerIdentityType, _, _ []byte) error {
   871  	return nil
   872  }
   873  
   874  func (*naiveCryptoService) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
   875  	return nil
   876  }
   877  
   878  // GetPKIidOfCert returns the PKI-ID of a peer's identity
   879  func (*naiveCryptoService) GetPKIidOfCert(peerIdentity api.PeerIdentityType) gossipcommon.PKIidType {
   880  	return gossipcommon.PKIidType(peerIdentity)
   881  }
   882  
   883  // VerifyBlock returns nil if the block is properly signed,
   884  // else returns error
   885  func (*naiveCryptoService) VerifyBlock(chainID gossipcommon.ChannelID, seqNum uint64, signedBlock *common.Block) error {
   886  	return nil
   887  }
   888  
   889  // Sign signs msg with this peer's signing key and outputs
   890  // the signature if no error occurred.
   891  func (*naiveCryptoService) Sign(msg []byte) ([]byte, error) {
   892  	return msg, nil
   893  }
   894  
   895  // Verify checks that signature is a valid signature of message under a peer's verification key.
   896  // If the verification succeeded, Verify returns nil meaning no error occurred.
   897  // If peerCert is nil, then the signature is verified against this peer's verification key.
   898  func (*naiveCryptoService) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
   899  	equal := bytes.Equal(signature, message)
   900  	if !equal {
   901  		return fmt.Errorf("Wrong signature:%v, %v", signature, message)
   902  	}
   903  	return nil
   904  }
   905  
   906  var orgInChannelA = api.OrgIdentityType("ORG1")
   907  
   908  func TestInvalidInitialization(t *testing.T) {
   909  	grpcServer := grpc.NewServer()
   910  	endpoint, socket := getAvailablePort(t)
   911  
   912  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   913  	assert.NoError(t, err)
   914  
   915  	mockSignerSerializer := &mocks.SignerSerializer{}
   916  	mockSignerSerializer.SerializeReturns(api.PeerIdentityType("peer-identity"), nil)
   917  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(cryptoProvider))
   918  	gossipConfig, err := gossip.GlobalConfig(endpoint, nil)
   919  	assert.NoError(t, err)
   920  
   921  	grpcClient, err := comm.NewGRPCClient(comm.ClientConfig{})
   922  	require.NoError(t, err)
   923  
   924  	gossipService, err := New(
   925  		mockSignerSerializer,
   926  		gossipmetrics.NewGossipMetrics(&disabled.Provider{}),
   927  		endpoint,
   928  		grpcServer,
   929  		&naiveCryptoService{},
   930  		secAdv,
   931  		nil,
   932  		comm.NewCredentialSupport(),
   933  		grpcClient,
   934  		gossipConfig,
   935  		&ServiceConfig{},
   936  		&deliverservice.DeliverServiceConfig{
   937  			PeerTLSEnabled:              false,
   938  			ReConnectBackoffThreshold:   deliverservice.DefaultReConnectBackoffThreshold,
   939  			ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold,
   940  		},
   941  	)
   942  	assert.NoError(t, err)
   943  	gService := gossipService
   944  	defer gService.Stop()
   945  
   946  	go grpcServer.Serve(socket)
   947  	defer grpcServer.Stop()
   948  
   949  	dc := gService.deliveryFactory.Service(gService, orderers.NewConnectionSource(flogging.MustGetLogger("peer.orderers"), nil), &naiveCryptoService{}, false)
   950  	assert.NotNil(t, dc)
   951  }
   952  
   953  func TestChannelConfig(t *testing.T) {
   954  	// Test whenever gossip service is indeed singleton
   955  	grpcServer := grpc.NewServer()
   956  	endpoint, socket := getAvailablePort(t)
   957  
   958  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   959  	assert.NoError(t, err)
   960  
   961  	mockSignerSerializer := &mocks.SignerSerializer{}
   962  	mockSignerSerializer.SerializeReturns(api.PeerIdentityType("peer-identity"), nil)
   963  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(cryptoProvider))
   964  	gossipConfig, err := gossip.GlobalConfig(endpoint, nil)
   965  	assert.NoError(t, err)
   966  
   967  	grpcClient, err := comm.NewGRPCClient(comm.ClientConfig{})
   968  
   969  	gossipService, err := New(
   970  		mockSignerSerializer,
   971  		gossipmetrics.NewGossipMetrics(&disabled.Provider{}),
   972  		endpoint,
   973  		grpcServer,
   974  		&naiveCryptoService{},
   975  		secAdv,
   976  		nil,
   977  		nil,
   978  		grpcClient,
   979  		gossipConfig,
   980  		&ServiceConfig{},
   981  		&deliverservice.DeliverServiceConfig{
   982  			ReConnectBackoffThreshold:   deliverservice.DefaultReConnectBackoffThreshold,
   983  			ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold,
   984  		},
   985  	)
   986  	assert.NoError(t, err)
   987  	gService := gossipService
   988  	defer gService.Stop()
   989  
   990  	go grpcServer.Serve(socket)
   991  	defer grpcServer.Stop()
   992  
   993  	jcm := &joinChannelMessage{seqNum: 1, members2AnchorPeers: map[string][]api.AnchorPeer{
   994  		"A": {{Host: "host", Port: 5000}},
   995  	}}
   996  
   997  	assert.Equal(t, uint64(1), jcm.SequenceNumber())
   998  
   999  	mc := &mockConfig{
  1000  		sequence: 1,
  1001  		orgs: map[string]channelconfig.ApplicationOrg{
  1002  			string(orgInChannelA): &appGrp{
  1003  				mspID:       string(orgInChannelA),
  1004  				anchorPeers: []*peer.AnchorPeer{},
  1005  			},
  1006  		},
  1007  	}
  1008  	gService.JoinChan(jcm, gossipcommon.ChannelID("A"))
  1009  	gService.updateAnchors(mc)
  1010  	assert.True(t, gService.amIinChannel(string(orgInChannelA), mc))
  1011  }
  1012  
  1013  func defaultDeliverClientDialOpts() []grpc.DialOption {
  1014  	dialOpts := []grpc.DialOption{grpc.WithBlock()}
  1015  	dialOpts = append(
  1016  		dialOpts,
  1017  		grpc.WithDefaultCallOptions(
  1018  			grpc.MaxCallRecvMsgSize(comm.MaxRecvMsgSize),
  1019  			grpc.MaxCallSendMsgSize(comm.MaxSendMsgSize)))
  1020  	kaOpts := comm.DefaultKeepaliveOptions
  1021  	dialOpts = append(dialOpts, comm.ClientKeepaliveOptions(kaOpts)...)
  1022  
  1023  	return dialOpts
  1024  }