github.com/Hnampk/fabric@v2.1.1+incompatible/gossip/discovery/discovery_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package discovery
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"io"
    14  	"math/rand"
    15  	"net"
    16  	"sort"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"sync/atomic"
    21  	"testing"
    22  	"time"
    23  
    24  	protoG "github.com/golang/protobuf/proto"
    25  	proto "github.com/hyperledger/fabric-protos-go/gossip"
    26  	"github.com/hyperledger/fabric/common/flogging"
    27  	"github.com/hyperledger/fabric/gossip/common"
    28  	"github.com/hyperledger/fabric/gossip/gossip/msgstore"
    29  	"github.com/hyperledger/fabric/gossip/protoext"
    30  	"github.com/hyperledger/fabric/gossip/util"
    31  	"github.com/stretchr/testify/assert"
    32  	"github.com/stretchr/testify/mock"
    33  	"go.uber.org/zap"
    34  	"go.uber.org/zap/zapcore"
    35  	"google.golang.org/grpc"
    36  	"google.golang.org/grpc/connectivity"
    37  )
    38  
    39  var timeout = time.Second * time.Duration(15)
    40  
    41  var aliveTimeInterval = time.Duration(time.Millisecond * 300)
    42  var defaultTestConfig = DiscoveryConfig{
    43  	AliveTimeInterval:            aliveTimeInterval,
    44  	AliveExpirationTimeout:       10 * aliveTimeInterval,
    45  	AliveExpirationCheckInterval: aliveTimeInterval,
    46  	ReconnectInterval:            10 * aliveTimeInterval,
    47  }
    48  
    49  func init() {
    50  	util.SetupTestLogging()
    51  	maxConnectionAttempts = 10000
    52  }
    53  
    54  type dummyReceivedMessage struct {
    55  	msg  *protoext.SignedGossipMessage
    56  	info *protoext.ConnectionInfo
    57  }
    58  
    59  func (*dummyReceivedMessage) Respond(msg *proto.GossipMessage) {
    60  	panic("implement me")
    61  }
    62  
    63  func (rm *dummyReceivedMessage) GetGossipMessage() *protoext.SignedGossipMessage {
    64  	return rm.msg
    65  }
    66  
    67  func (*dummyReceivedMessage) GetSourceEnvelope() *proto.Envelope {
    68  	panic("implement me")
    69  }
    70  
    71  func (rm *dummyReceivedMessage) GetConnectionInfo() *protoext.ConnectionInfo {
    72  	return rm.info
    73  }
    74  
    75  func (*dummyReceivedMessage) Ack(err error) {
    76  	panic("implement me")
    77  }
    78  
    79  type dummyCommModule struct {
    80  	validatedMessages chan *protoext.SignedGossipMessage
    81  	msgsReceived      uint32
    82  	msgsSent          uint32
    83  	id                string
    84  	identitySwitch    chan common.PKIidType
    85  	presumeDead       chan common.PKIidType
    86  	detectedDead      chan string
    87  	streams           map[string]proto.Gossip_GossipStreamClient
    88  	conns             map[string]*grpc.ClientConn
    89  	lock              *sync.RWMutex
    90  	incMsgs           chan protoext.ReceivedMessage
    91  	lastSeqs          map[string]uint64
    92  	shouldGossip      bool
    93  	disableComm       bool
    94  	mock              *mock.Mock
    95  }
    96  
    97  type gossipInstance struct {
    98  	msgInterceptor func(*protoext.SignedGossipMessage)
    99  	comm           *dummyCommModule
   100  	Discovery
   101  	gRGCserv      *grpc.Server
   102  	lsnr          net.Listener
   103  	shouldGossip  bool
   104  	syncInitiator *time.Ticker
   105  	stopChan      chan struct{}
   106  	port          int
   107  }
   108  
   109  func (comm *dummyCommModule) ValidateAliveMsg(am *protoext.SignedGossipMessage) bool {
   110  	comm.lock.RLock()
   111  	c := comm.validatedMessages
   112  	comm.lock.RUnlock()
   113  
   114  	if c != nil {
   115  		c <- am
   116  	}
   117  	return true
   118  }
   119  
   120  func (comm *dummyCommModule) IdentitySwitch() <-chan common.PKIidType {
   121  	return comm.identitySwitch
   122  }
   123  
   124  func (comm *dummyCommModule) recordValidation(validatedMessages chan *protoext.SignedGossipMessage) {
   125  	comm.lock.Lock()
   126  	defer comm.lock.Unlock()
   127  	comm.validatedMessages = validatedMessages
   128  }
   129  
   130  func (comm *dummyCommModule) SignMessage(am *proto.GossipMessage, internalEndpoint string) *proto.Envelope {
   131  	protoext.NoopSign(am)
   132  
   133  	secret := &proto.Secret{
   134  		Content: &proto.Secret_InternalEndpoint{
   135  			InternalEndpoint: internalEndpoint,
   136  		},
   137  	}
   138  	signer := func(msg []byte) ([]byte, error) {
   139  		return nil, nil
   140  	}
   141  	s, _ := protoext.NoopSign(am)
   142  	env := s.Envelope
   143  	protoext.SignSecret(env, signer, secret)
   144  	return env
   145  }
   146  
   147  func (comm *dummyCommModule) Gossip(msg *protoext.SignedGossipMessage) {
   148  	if !comm.shouldGossip || comm.disableComm {
   149  		return
   150  	}
   151  	comm.lock.Lock()
   152  	defer comm.lock.Unlock()
   153  	for _, conn := range comm.streams {
   154  		conn.Send(msg.Envelope)
   155  	}
   156  }
   157  
   158  func (comm *dummyCommModule) Forward(msg protoext.ReceivedMessage) {
   159  	if !comm.shouldGossip || comm.disableComm {
   160  		return
   161  	}
   162  	comm.lock.Lock()
   163  	defer comm.lock.Unlock()
   164  	for _, conn := range comm.streams {
   165  		conn.Send(msg.GetGossipMessage().Envelope)
   166  	}
   167  }
   168  
   169  func (comm *dummyCommModule) SendToPeer(peer *NetworkMember, msg *protoext.SignedGossipMessage) {
   170  	if comm.disableComm {
   171  		return
   172  	}
   173  	comm.lock.RLock()
   174  	_, exists := comm.streams[peer.Endpoint]
   175  	mock := comm.mock
   176  	comm.lock.RUnlock()
   177  
   178  	if mock != nil {
   179  		mock.Called(peer, msg)
   180  	}
   181  
   182  	if !exists {
   183  		if comm.Ping(peer) == false {
   184  			fmt.Printf("Ping to %v failed\n", peer.Endpoint)
   185  			return
   186  		}
   187  	}
   188  	comm.lock.Lock()
   189  	s, _ := protoext.NoopSign(msg.GossipMessage)
   190  	comm.streams[peer.Endpoint].Send(s.Envelope)
   191  	comm.lock.Unlock()
   192  	atomic.AddUint32(&comm.msgsSent, 1)
   193  }
   194  
   195  func (comm *dummyCommModule) Ping(peer *NetworkMember) bool {
   196  	if comm.disableComm {
   197  		return false
   198  	}
   199  	comm.lock.Lock()
   200  	defer comm.lock.Unlock()
   201  
   202  	if comm.mock != nil {
   203  		comm.mock.Called()
   204  	}
   205  
   206  	_, alreadyExists := comm.streams[peer.Endpoint]
   207  	conn := comm.conns[peer.Endpoint]
   208  	if !alreadyExists || conn.GetState() == connectivity.Shutdown {
   209  		newConn, err := grpc.Dial(peer.Endpoint, grpc.WithInsecure())
   210  		if err != nil {
   211  			return false
   212  		}
   213  		if stream, err := proto.NewGossipClient(newConn).GossipStream(context.Background()); err == nil {
   214  			comm.conns[peer.Endpoint] = newConn
   215  			comm.streams[peer.Endpoint] = stream
   216  			return true
   217  		}
   218  		return false
   219  	}
   220  	if _, err := proto.NewGossipClient(conn).Ping(context.Background(), &proto.Empty{}); err != nil {
   221  		return false
   222  	}
   223  	return true
   224  }
   225  
   226  func (comm *dummyCommModule) Accept() <-chan protoext.ReceivedMessage {
   227  	return comm.incMsgs
   228  }
   229  
   230  func (comm *dummyCommModule) PresumedDead() <-chan common.PKIidType {
   231  	return comm.presumeDead
   232  }
   233  
   234  func (comm *dummyCommModule) CloseConn(peer *NetworkMember) {
   235  	comm.lock.Lock()
   236  	defer comm.lock.Unlock()
   237  
   238  	if _, exists := comm.streams[peer.Endpoint]; !exists {
   239  		return
   240  	}
   241  
   242  	comm.streams[peer.Endpoint].CloseSend()
   243  	comm.conns[peer.Endpoint].Close()
   244  }
   245  
   246  func (g *gossipInstance) receivedMsgCount() int {
   247  	return int(atomic.LoadUint32(&g.comm.msgsReceived))
   248  }
   249  
   250  func (g *gossipInstance) sentMsgCount() int {
   251  	return int(atomic.LoadUint32(&g.comm.msgsSent))
   252  }
   253  
   254  func (g *gossipInstance) discoveryImpl() *gossipDiscoveryImpl {
   255  	return g.Discovery.(*gossipDiscoveryImpl)
   256  }
   257  
   258  func (g *gossipInstance) initiateSync(frequency time.Duration, peerNum int) {
   259  	g.syncInitiator = time.NewTicker(frequency)
   260  	g.stopChan = make(chan struct{})
   261  	go func() {
   262  		for {
   263  			select {
   264  			case <-g.syncInitiator.C:
   265  				g.Discovery.InitiateSync(peerNum)
   266  			case <-g.stopChan:
   267  				g.syncInitiator.Stop()
   268  				return
   269  			}
   270  		}
   271  	}()
   272  }
   273  
   274  func (g *gossipInstance) GossipStream(stream proto.Gossip_GossipStreamServer) error {
   275  	for {
   276  		envelope, err := stream.Recv()
   277  		if err == io.EOF {
   278  			return nil
   279  		}
   280  		if err != nil {
   281  			return err
   282  		}
   283  		lgr := g.Discovery.(*gossipDiscoveryImpl).logger
   284  		gMsg, err := protoext.EnvelopeToGossipMessage(envelope)
   285  		if err != nil {
   286  			lgr.Warning("Failed deserializing GossipMessage from envelope:", err)
   287  			continue
   288  		}
   289  		g.msgInterceptor(gMsg)
   290  
   291  		lgr.Debug(g.Discovery.Self().Endpoint, "Got message:", gMsg)
   292  		g.comm.incMsgs <- &dummyReceivedMessage{
   293  			msg: gMsg,
   294  			info: &protoext.ConnectionInfo{
   295  				ID: common.PKIidType("testID"),
   296  			},
   297  		}
   298  		atomic.AddUint32(&g.comm.msgsReceived, 1)
   299  
   300  		if aliveMsg := gMsg.GetAliveMsg(); aliveMsg != nil {
   301  			g.tryForwardMessage(gMsg)
   302  		}
   303  	}
   304  }
   305  
   306  func (g *gossipInstance) tryForwardMessage(msg *protoext.SignedGossipMessage) {
   307  	g.comm.lock.Lock()
   308  
   309  	aliveMsg := msg.GetAliveMsg()
   310  
   311  	forward := false
   312  	id := string(aliveMsg.Membership.PkiId)
   313  	seqNum := aliveMsg.Timestamp.SeqNum
   314  	if last, exists := g.comm.lastSeqs[id]; exists {
   315  		if last < seqNum {
   316  			g.comm.lastSeqs[id] = seqNum
   317  			forward = true
   318  		}
   319  	} else {
   320  		g.comm.lastSeqs[id] = seqNum
   321  		forward = true
   322  	}
   323  
   324  	g.comm.lock.Unlock()
   325  
   326  	if forward {
   327  		g.comm.Gossip(msg)
   328  	}
   329  }
   330  
   331  func (g *gossipInstance) Stop() {
   332  	if g.syncInitiator != nil {
   333  		g.stopChan <- struct{}{}
   334  	}
   335  	g.gRGCserv.Stop()
   336  	g.lsnr.Close()
   337  	g.comm.lock.Lock()
   338  	for _, stream := range g.comm.streams {
   339  		stream.CloseSend()
   340  	}
   341  	g.comm.lock.Unlock()
   342  	for _, conn := range g.comm.conns {
   343  		conn.Close()
   344  	}
   345  	g.Discovery.Stop()
   346  }
   347  
   348  func (g *gossipInstance) Ping(context.Context, *proto.Empty) (*proto.Empty, error) {
   349  	return &proto.Empty{}, nil
   350  }
   351  
   352  var noopPolicy = func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
   353  	return func(msg *protoext.SignedGossipMessage) bool {
   354  			return true
   355  		}, func(message *protoext.SignedGossipMessage) *proto.Envelope {
   356  			return message.Envelope
   357  		}
   358  }
   359  
   360  func createDiscoveryInstance(port int, id string, bootstrapPeers []string) *gossipInstance {
   361  	return createDiscoveryInstanceCustomConfig(port, id, bootstrapPeers, defaultTestConfig)
   362  }
   363  
   364  func createDiscoveryInstanceCustomConfig(port int, id string, bootstrapPeers []string, config DiscoveryConfig) *gossipInstance {
   365  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, true, noopPolicy, config)
   366  }
   367  
   368  func createDiscoveryInstanceWithNoGossip(port int, id string, bootstrapPeers []string) *gossipInstance {
   369  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, noopPolicy, defaultTestConfig)
   370  }
   371  
   372  func createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(port int, id string, bootstrapPeers []string, pol DisclosurePolicy) *gossipInstance {
   373  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, pol, defaultTestConfig)
   374  }
   375  
   376  func createDiscoveryInstanceThatGossips(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy, config DiscoveryConfig) *gossipInstance {
   377  	return createDiscoveryInstanceThatGossipsWithInterceptors(port, id, bootstrapPeers, shouldGossip, pol, func(_ *protoext.SignedGossipMessage) {}, config)
   378  }
   379  
   380  func createDiscoveryInstanceThatGossipsWithInterceptors(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy, f func(*protoext.SignedGossipMessage), config DiscoveryConfig) *gossipInstance {
   381  	comm := &dummyCommModule{
   382  		conns:          make(map[string]*grpc.ClientConn),
   383  		streams:        make(map[string]proto.Gossip_GossipStreamClient),
   384  		incMsgs:        make(chan protoext.ReceivedMessage, 1000),
   385  		presumeDead:    make(chan common.PKIidType, 10000),
   386  		id:             id,
   387  		detectedDead:   make(chan string, 10000),
   388  		identitySwitch: make(chan common.PKIidType),
   389  		lock:           &sync.RWMutex{},
   390  		lastSeqs:       make(map[string]uint64),
   391  		shouldGossip:   shouldGossip,
   392  		disableComm:    false,
   393  	}
   394  
   395  	endpoint := fmt.Sprintf("localhost:%d", port)
   396  	self := NetworkMember{
   397  		Metadata:         []byte{},
   398  		PKIid:            []byte(endpoint),
   399  		Endpoint:         endpoint,
   400  		InternalEndpoint: endpoint,
   401  	}
   402  
   403  	listenAddress := fmt.Sprintf("%s:%d", "", port)
   404  	ll, err := net.Listen("tcp", listenAddress)
   405  	if err != nil {
   406  		errMsg := fmt.Sprintf("Failed creating listener on address %v for gossip instance: %v", listenAddress, err)
   407  		panic(errMsg)
   408  	}
   409  	s := grpc.NewServer()
   410  
   411  	config.BootstrapPeers = bootstrapPeers
   412  	discSvc := NewDiscoveryService(self, comm, comm, pol, config)
   413  	for _, bootPeer := range bootstrapPeers {
   414  		bp := bootPeer
   415  		discSvc.Connect(NetworkMember{Endpoint: bp, InternalEndpoint: bootPeer}, func() (*PeerIdentification, error) {
   416  			return &PeerIdentification{SelfOrg: true, ID: common.PKIidType(bp)}, nil
   417  		})
   418  	}
   419  
   420  	gossInst := &gossipInstance{comm: comm, gRGCserv: s, Discovery: discSvc, lsnr: ll, shouldGossip: shouldGossip, port: port, msgInterceptor: f}
   421  
   422  	proto.RegisterGossipServer(s, gossInst)
   423  	go s.Serve(ll)
   424  
   425  	return gossInst
   426  }
   427  
   428  func bootPeer(port int) string {
   429  	return fmt.Sprintf("localhost:%d", port)
   430  }
   431  
   432  func TestClone(t *testing.T) {
   433  	nm := &NetworkMember{
   434  		PKIid: common.PKIidType("abc"),
   435  		Properties: &proto.Properties{
   436  			LedgerHeight: 1,
   437  			LeftChannel:  true,
   438  		},
   439  		Envelope: &proto.Envelope{
   440  			Payload: []byte("payload"),
   441  		},
   442  		InternalEndpoint: "internal",
   443  		Metadata:         []byte{1, 2, 3},
   444  		Endpoint:         "endpoint",
   445  	}
   446  
   447  	nm2 := nm.Clone()
   448  	assert.Equal(t, *nm, nm2, "Clones are different")
   449  	assert.False(t, nm.Properties == nm2.Properties, "Cloning should be deep and not shallow")
   450  	assert.False(t, nm.Envelope == nm2.Envelope, "Cloning should be deep and not shallow")
   451  }
   452  
   453  func TestHasExternalEndpoints(t *testing.T) {
   454  	memberWithEndpoint := NetworkMember{Endpoint: "foo"}
   455  	memberWithoutEndpoint := NetworkMember{}
   456  
   457  	assert.True(t, HasExternalEndpoint(memberWithEndpoint))
   458  	assert.False(t, HasExternalEndpoint(memberWithoutEndpoint))
   459  }
   460  
   461  func TestToString(t *testing.T) {
   462  	nm := NetworkMember{
   463  		Endpoint:         "a",
   464  		InternalEndpoint: "b",
   465  	}
   466  	assert.Equal(t, "b", nm.PreferredEndpoint())
   467  	nm = NetworkMember{
   468  		Endpoint: "a",
   469  	}
   470  	assert.Equal(t, "a", nm.PreferredEndpoint())
   471  
   472  	now := time.Now()
   473  	ts := &timestamp{
   474  		incTime: now,
   475  		seqNum:  uint64(42),
   476  	}
   477  	assert.Equal(t, fmt.Sprintf("%d, %d", now.UnixNano(), 42), fmt.Sprint(ts))
   478  }
   479  
   480  func TestNetworkMemberString(t *testing.T) {
   481  	tests := []struct {
   482  		input    NetworkMember
   483  		expected string
   484  	}{
   485  		{
   486  			input:    NetworkMember{Endpoint: "endpoint", InternalEndpoint: "internal-endpoint", PKIid: common.PKIidType{0, 1, 2, 3}, Metadata: nil},
   487  			expected: "Endpoint: endpoint, InternalEndpoint: internal-endpoint, PKI-ID: 00010203, Metadata: ",
   488  		},
   489  		{
   490  			input:    NetworkMember{Endpoint: "endpoint", InternalEndpoint: "internal-endpoint", PKIid: common.PKIidType{0, 1, 2, 3}, Metadata: []byte{4, 5, 6, 7}},
   491  			expected: "Endpoint: endpoint, InternalEndpoint: internal-endpoint, PKI-ID: 00010203, Metadata: 04050607",
   492  		},
   493  	}
   494  	for _, tt := range tests {
   495  		assert.Equal(t, tt.expected, tt.input.String())
   496  	}
   497  }
   498  
   499  func TestBadInput(t *testing.T) {
   500  	inst := createDiscoveryInstance(2048, fmt.Sprintf("d%d", 0), []string{})
   501  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(nil)
   502  	s, _ := protoext.NoopSign(&proto.GossipMessage{
   503  		Content: &proto.GossipMessage_DataMsg{
   504  			DataMsg: &proto.DataMessage{},
   505  		},
   506  	})
   507  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(&dummyReceivedMessage{
   508  		msg: s,
   509  		info: &protoext.ConnectionInfo{
   510  			ID: common.PKIidType("testID"),
   511  		},
   512  	})
   513  }
   514  
   515  func TestConnect(t *testing.T) {
   516  	t.Parallel()
   517  	nodeNum := 10
   518  	instances := []*gossipInstance{}
   519  	firstSentMemReqMsgs := make(chan *protoext.SignedGossipMessage, nodeNum)
   520  	for i := 0; i < nodeNum; i++ {
   521  		inst := createDiscoveryInstance(7611+i, fmt.Sprintf("d%d", i), []string{})
   522  
   523  		inst.comm.lock.Lock()
   524  		inst.comm.mock = &mock.Mock{}
   525  		inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   526  			inst := inst
   527  			msg := arguments.Get(1).(*protoext.SignedGossipMessage)
   528  			if req := msg.GetMemReq(); req != nil {
   529  				selfMsg, _ := protoext.EnvelopeToGossipMessage(req.SelfInformation)
   530  				firstSentMemReqMsgs <- selfMsg
   531  				inst.comm.lock.Lock()
   532  				inst.comm.mock = nil
   533  				inst.comm.lock.Unlock()
   534  			}
   535  		})
   536  		inst.comm.mock.On("Ping", mock.Anything)
   537  		inst.comm.lock.Unlock()
   538  		instances = append(instances, inst)
   539  		j := (i + 1) % 10
   540  		endpoint := fmt.Sprintf("localhost:%d", 7611+j)
   541  		netMember2Connect2 := NetworkMember{Endpoint: endpoint, PKIid: []byte(endpoint)}
   542  		inst.Connect(netMember2Connect2, func() (identification *PeerIdentification, err error) {
   543  			return &PeerIdentification{SelfOrg: false, ID: nil}, nil
   544  		})
   545  	}
   546  
   547  	time.Sleep(time.Second * 3)
   548  	fullMembership := func() bool {
   549  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   550  	}
   551  	waitUntilOrFail(t, fullMembership)
   552  
   553  	discInst := instances[rand.Intn(len(instances))].Discovery.(*gossipDiscoveryImpl)
   554  	mr, _ := discInst.createMembershipRequest(true)
   555  	am, _ := protoext.EnvelopeToGossipMessage(mr.GetMemReq().SelfInformation)
   556  	assert.NotNil(t, am.SecretEnvelope)
   557  	mr2, _ := discInst.createMembershipRequest(false)
   558  	am, _ = protoext.EnvelopeToGossipMessage(mr2.GetMemReq().SelfInformation)
   559  	assert.Nil(t, am.SecretEnvelope)
   560  	stopInstances(t, instances)
   561  	assert.Len(t, firstSentMemReqMsgs, 10)
   562  	close(firstSentMemReqMsgs)
   563  	for firstSentSelfMsg := range firstSentMemReqMsgs {
   564  		assert.Nil(t, firstSentSelfMsg.Envelope.SecretEnvelope)
   565  	}
   566  }
   567  
   568  func TestValidation(t *testing.T) {
   569  	t.Parallel()
   570  
   571  	// Scenarios: This test contains the following sub-tests:
   572  	// 1) alive message validation: a message is validated <==> it entered the message store
   573  	// 2) request/response message validation:
   574  	//   2.1) alive messages from membership requests/responses are validated.
   575  	//   2.2) once alive messages enter the message store, reception of them via membership responses
   576  	//        doesn't trigger validation, but via membership requests - do.
   577  
   578  	wrapReceivedMessage := func(msg *protoext.SignedGossipMessage) protoext.ReceivedMessage {
   579  		return &dummyReceivedMessage{
   580  			msg: msg,
   581  			info: &protoext.ConnectionInfo{
   582  				ID: common.PKIidType("testID"),
   583  			},
   584  		}
   585  	}
   586  
   587  	requestMessagesReceived := make(chan *protoext.SignedGossipMessage, 100)
   588  	responseMessagesReceived := make(chan *protoext.SignedGossipMessage, 100)
   589  	aliveMessagesReceived := make(chan *protoext.SignedGossipMessage, 5000)
   590  
   591  	var membershipRequest atomic.Value
   592  	var membershipResponseWithAlivePeers atomic.Value
   593  	var membershipResponseWithDeadPeers atomic.Value
   594  
   595  	recordMembershipRequest := func(req *protoext.SignedGossipMessage) {
   596  		msg, _ := protoext.EnvelopeToGossipMessage(req.GetMemReq().SelfInformation)
   597  		membershipRequest.Store(req)
   598  		requestMessagesReceived <- msg
   599  	}
   600  
   601  	recordMembershipResponse := func(res *protoext.SignedGossipMessage) {
   602  		memRes := res.GetMemRes()
   603  		if len(memRes.GetAlive()) > 0 {
   604  			membershipResponseWithAlivePeers.Store(res)
   605  		}
   606  		if len(memRes.GetDead()) > 0 {
   607  			membershipResponseWithDeadPeers.Store(res)
   608  		}
   609  		responseMessagesReceived <- res
   610  	}
   611  
   612  	interceptor := func(msg *protoext.SignedGossipMessage) {
   613  		if memReq := msg.GetMemReq(); memReq != nil {
   614  			recordMembershipRequest(msg)
   615  			return
   616  		}
   617  
   618  		if memRes := msg.GetMemRes(); memRes != nil {
   619  			recordMembershipResponse(msg)
   620  			return
   621  		}
   622  		// Else, it's an alive message
   623  		aliveMessagesReceived <- msg
   624  	}
   625  
   626  	// p3 is the boot peer of p1, and p1 is the boot peer of p2.
   627  	// p1 sends a (membership) request to p3, and receives a (membership) response back.
   628  	// p2 sends a (membership) request to p1.
   629  	// Therefore, p1 receives both a membership request and a response.
   630  	p1 := createDiscoveryInstanceThatGossipsWithInterceptors(4675, "p1", []string{bootPeer(4677)}, true, noopPolicy, interceptor, defaultTestConfig)
   631  	p2 := createDiscoveryInstance(4676, "p2", []string{bootPeer(4675)})
   632  	p3 := createDiscoveryInstance(4677, "p3", nil)
   633  	instances := []*gossipInstance{p1, p2, p3}
   634  
   635  	assertMembership(t, instances, 2)
   636  
   637  	instances = []*gossipInstance{p1, p2}
   638  	// Stop p3 and wait until its death is detected
   639  	p3.Stop()
   640  	assertMembership(t, instances, 1)
   641  	// Force p1 to send a membership request so it can receive back a response
   642  	// with dead peers.
   643  	p1.InitiateSync(1)
   644  
   645  	// Wait until a response with a dead peer is received
   646  	waitUntilOrFail(t, func() bool {
   647  		return membershipResponseWithDeadPeers.Load() != nil
   648  	})
   649  
   650  	p1.Stop()
   651  	p2.Stop()
   652  
   653  	close(aliveMessagesReceived)
   654  	t.Log("Recorded", len(aliveMessagesReceived), "alive messages")
   655  	t.Log("Recorded", len(requestMessagesReceived), "request messages")
   656  	t.Log("Recorded", len(responseMessagesReceived), "response messages")
   657  
   658  	// Ensure we got alive messages from membership requests and from membership responses
   659  	assert.NotNil(t, membershipResponseWithAlivePeers.Load())
   660  	assert.NotNil(t, membershipRequest.Load())
   661  
   662  	t.Run("alive message", func(t *testing.T) {
   663  		t.Parallel()
   664  		// Spawn a new peer - p4
   665  		p4 := createDiscoveryInstance(4678, "p1", nil)
   666  		defer p4.Stop()
   667  		// Record messages validated
   668  		validatedMessages := make(chan *protoext.SignedGossipMessage, 5000)
   669  		p4.comm.recordValidation(validatedMessages)
   670  		tmpMsgs := make(chan *protoext.SignedGossipMessage, 5000)
   671  		// Replay the messages sent to p1 into p4, and also save them into a temporary channel
   672  		for msg := range aliveMessagesReceived {
   673  			p4.comm.incMsgs <- wrapReceivedMessage(msg)
   674  			tmpMsgs <- msg
   675  		}
   676  
   677  		// Simulate the messages received by p4 into the message store
   678  		policy := protoext.NewGossipMessageComparator(0)
   679  		msgStore := msgstore.NewMessageStore(policy, func(_ interface{}) {})
   680  		close(tmpMsgs)
   681  		for msg := range tmpMsgs {
   682  			if msgStore.Add(msg) {
   683  				// Ensure the message was verified if it can be added into the message store
   684  				expectedMessage := <-validatedMessages
   685  				assert.Equal(t, expectedMessage, msg)
   686  			}
   687  		}
   688  		// Ensure we didn't validate any other messages.
   689  		assert.Empty(t, validatedMessages)
   690  	})
   691  
   692  	req := membershipRequest.Load().(*protoext.SignedGossipMessage)
   693  	res := membershipResponseWithDeadPeers.Load().(*protoext.SignedGossipMessage)
   694  	// Ensure the membership response contains both alive and dead peers
   695  	assert.Len(t, res.GetMemRes().GetAlive(), 2)
   696  	assert.Len(t, res.GetMemRes().GetDead(), 1)
   697  
   698  	for _, testCase := range []struct {
   699  		name                  string
   700  		expectedAliveMessages int
   701  		port                  int
   702  		message               *protoext.SignedGossipMessage
   703  		shouldBeReValidated   bool
   704  	}{
   705  		{
   706  			name:                  "membership request",
   707  			expectedAliveMessages: 1,
   708  			message:               req,
   709  			port:                  4679,
   710  			shouldBeReValidated:   true,
   711  		},
   712  		{
   713  			name:                  "membership response",
   714  			expectedAliveMessages: 3,
   715  			message:               res,
   716  			port:                  4680,
   717  		},
   718  	} {
   719  		testCase := testCase
   720  		t.Run(testCase.name, func(t *testing.T) {
   721  			t.Parallel()
   722  			p := createDiscoveryInstance(testCase.port, "p", nil)
   723  			defer p.Stop()
   724  			// Record messages validated
   725  			validatedMessages := make(chan *protoext.SignedGossipMessage, testCase.expectedAliveMessages)
   726  			p.comm.recordValidation(validatedMessages)
   727  
   728  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   729  			// Ensure all messages were validated
   730  			for i := 0; i < testCase.expectedAliveMessages; i++ {
   731  				validatedMsg := <-validatedMessages
   732  				// send the message directly to be included in the message store
   733  				p.comm.incMsgs <- wrapReceivedMessage(validatedMsg)
   734  			}
   735  			// Wait for the messages to be validated
   736  			for i := 0; i < testCase.expectedAliveMessages; i++ {
   737  				<-validatedMessages
   738  			}
   739  			// Not more than testCase.expectedAliveMessages should have been validated
   740  			assert.Empty(t, validatedMessages)
   741  
   742  			if !testCase.shouldBeReValidated {
   743  				// Re-submit the message twice and ensure it wasn't validated.
   744  				// If it is validated, panic would occur because an enqueue to the validatesMessages channel
   745  				// would be attempted and the channel is closed.
   746  				close(validatedMessages)
   747  			}
   748  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   749  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   750  			// Wait until the size of the channel is zero. It means at least one message was processed.
   751  			waitUntilOrFail(t, func() bool {
   752  				return len(p.comm.incMsgs) == 0
   753  			})
   754  		})
   755  	}
   756  }
   757  
   758  func TestUpdate(t *testing.T) {
   759  	t.Parallel()
   760  	nodeNum := 5
   761  	bootPeers := []string{bootPeer(6611), bootPeer(6612)}
   762  	instances := []*gossipInstance{}
   763  
   764  	inst := createDiscoveryInstance(6611, "d1", bootPeers)
   765  	instances = append(instances, inst)
   766  
   767  	inst = createDiscoveryInstance(6612, "d2", bootPeers)
   768  	instances = append(instances, inst)
   769  
   770  	for i := 3; i <= nodeNum; i++ {
   771  		id := fmt.Sprintf("d%d", i)
   772  		inst = createDiscoveryInstance(6610+i, id, bootPeers)
   773  		instances = append(instances, inst)
   774  	}
   775  
   776  	fullMembership := func() bool {
   777  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   778  	}
   779  
   780  	waitUntilOrFail(t, fullMembership)
   781  
   782  	instances[0].UpdateMetadata([]byte("bla bla"))
   783  	instances[nodeNum-1].UpdateEndpoint("localhost:5511")
   784  
   785  	checkMembership := func() bool {
   786  		for _, member := range instances[nodeNum-1].GetMembership() {
   787  			if string(member.PKIid) == instances[0].comm.id {
   788  				if "bla bla" != string(member.Metadata) {
   789  					return false
   790  				}
   791  			}
   792  		}
   793  
   794  		for _, member := range instances[0].GetMembership() {
   795  			if string(member.PKIid) == instances[nodeNum-1].comm.id {
   796  				if "localhost:5511" != string(member.Endpoint) {
   797  					return false
   798  				}
   799  			}
   800  		}
   801  		return true
   802  	}
   803  
   804  	waitUntilOrFail(t, checkMembership)
   805  	stopInstances(t, instances)
   806  }
   807  
   808  func TestInitiateSync(t *testing.T) {
   809  	t.Parallel()
   810  	nodeNum := 10
   811  	bootPeers := []string{bootPeer(3611), bootPeer(3612)}
   812  	instances := []*gossipInstance{}
   813  
   814  	toDie := int32(0)
   815  	for i := 1; i <= nodeNum; i++ {
   816  		id := fmt.Sprintf("d%d", i)
   817  		inst := createDiscoveryInstanceWithNoGossip(3610+i, id, bootPeers)
   818  		instances = append(instances, inst)
   819  		go func() {
   820  			for {
   821  				if atomic.LoadInt32(&toDie) == int32(1) {
   822  					return
   823  				}
   824  				time.Sleep(defaultTestConfig.AliveExpirationTimeout / 3)
   825  				inst.InitiateSync(9)
   826  			}
   827  		}()
   828  	}
   829  	time.Sleep(defaultTestConfig.AliveExpirationTimeout * 4)
   830  	assertMembership(t, instances, nodeNum-1)
   831  	atomic.StoreInt32(&toDie, int32(1))
   832  	stopInstances(t, instances)
   833  }
   834  
   835  func TestSelf(t *testing.T) {
   836  	t.Parallel()
   837  	inst := createDiscoveryInstance(13463, "d1", []string{})
   838  	defer inst.Stop()
   839  	env := inst.Self().Envelope
   840  	sMsg, err := protoext.EnvelopeToGossipMessage(env)
   841  	assert.NoError(t, err)
   842  	member := sMsg.GetAliveMsg().Membership
   843  	assert.Equal(t, "localhost:13463", member.Endpoint)
   844  	assert.Equal(t, []byte("localhost:13463"), member.PkiId)
   845  
   846  	assert.Equal(t, "localhost:13463", inst.Self().Endpoint)
   847  	assert.Equal(t, common.PKIidType("localhost:13463"), inst.Self().PKIid)
   848  }
   849  
   850  func TestExpiration(t *testing.T) {
   851  	t.Parallel()
   852  	nodeNum := 5
   853  	bootPeers := []string{bootPeer(2611), bootPeer(2612)}
   854  	instances := []*gossipInstance{}
   855  
   856  	inst := createDiscoveryInstance(2611, "d1", bootPeers)
   857  	instances = append(instances, inst)
   858  
   859  	inst = createDiscoveryInstance(2612, "d2", bootPeers)
   860  	instances = append(instances, inst)
   861  
   862  	for i := 3; i <= nodeNum; i++ {
   863  		id := fmt.Sprintf("d%d", i)
   864  		inst = createDiscoveryInstance(2610+i, id, bootPeers)
   865  		instances = append(instances, inst)
   866  	}
   867  
   868  	assertMembership(t, instances, nodeNum-1)
   869  
   870  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
   871  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
   872  
   873  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
   874  
   875  	stopAction := &sync.WaitGroup{}
   876  	for i, inst := range instances {
   877  		if i+2 == nodeNum {
   878  			break
   879  		}
   880  		stopAction.Add(1)
   881  		go func(inst *gossipInstance) {
   882  			defer stopAction.Done()
   883  			inst.Stop()
   884  		}(inst)
   885  	}
   886  
   887  	waitUntilOrFailBlocking(t, stopAction.Wait)
   888  }
   889  
   890  func TestGetFullMembership(t *testing.T) {
   891  	t.Parallel()
   892  	nodeNum := 15
   893  	bootPeers := []string{bootPeer(5511), bootPeer(5512)}
   894  	instances := []*gossipInstance{}
   895  	var inst *gossipInstance
   896  
   897  	for i := 3; i <= nodeNum; i++ {
   898  		id := fmt.Sprintf("d%d", i)
   899  		inst = createDiscoveryInstance(5510+i, id, bootPeers)
   900  		instances = append(instances, inst)
   901  	}
   902  
   903  	time.Sleep(time.Second)
   904  
   905  	inst = createDiscoveryInstance(5511, "d1", bootPeers)
   906  	instances = append(instances, inst)
   907  
   908  	inst = createDiscoveryInstance(5512, "d2", bootPeers)
   909  	instances = append(instances, inst)
   910  
   911  	assertMembership(t, instances, nodeNum-1)
   912  
   913  	// Ensure that internal endpoint was propagated to everyone
   914  	for _, inst := range instances {
   915  		for _, member := range inst.GetMembership() {
   916  			assert.NotEmpty(t, member.InternalEndpoint)
   917  			assert.NotEmpty(t, member.Endpoint)
   918  		}
   919  	}
   920  
   921  	// Check that Lookup() is valid
   922  	for _, inst := range instances {
   923  		for _, member := range inst.GetMembership() {
   924  			assert.Equal(t, string(member.PKIid), inst.Lookup(member.PKIid).Endpoint)
   925  			assert.Equal(t, member.PKIid, inst.Lookup(member.PKIid).PKIid)
   926  		}
   927  	}
   928  
   929  	stopInstances(t, instances)
   930  }
   931  
   932  func TestGossipDiscoveryStopping(t *testing.T) {
   933  	t.Parallel()
   934  	inst := createDiscoveryInstance(9611, "d1", []string{bootPeer(9611)})
   935  	time.Sleep(time.Second)
   936  	waitUntilOrFailBlocking(t, inst.Stop)
   937  }
   938  
   939  func TestGossipDiscoverySkipConnectingToLocalhostBootstrap(t *testing.T) {
   940  	t.Parallel()
   941  	inst := createDiscoveryInstance(11611, "d1", []string{"localhost:11611", "127.0.0.1:11611"})
   942  	inst.comm.lock.Lock()
   943  	inst.comm.mock = &mock.Mock{}
   944  	inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(mock.Arguments) {
   945  		t.Fatal("Should not have connected to any peer")
   946  	})
   947  	inst.comm.mock.On("Ping", mock.Anything).Run(func(mock.Arguments) {
   948  		t.Fatal("Should not have connected to any peer")
   949  	})
   950  	inst.comm.lock.Unlock()
   951  	time.Sleep(time.Second * 3)
   952  	waitUntilOrFailBlocking(t, inst.Stop)
   953  }
   954  
   955  func TestConvergence(t *testing.T) {
   956  	t.Parallel()
   957  	// scenario:
   958  	// {boot peer: [peer list]}
   959  	// {d1: d2, d3, d4}
   960  	// {d5: d6, d7, d8}
   961  	// {d9: d10, d11, d12}
   962  	// connect all boot peers with d13
   963  	// take down d13
   964  	// ensure still full membership
   965  	instances := []*gossipInstance{}
   966  	for _, i := range []int{1, 5, 9} {
   967  		bootPort := 4610 + i
   968  		id := fmt.Sprintf("d%d", i)
   969  		leader := createDiscoveryInstance(bootPort, id, []string{})
   970  		instances = append(instances, leader)
   971  		for minionIndex := 1; minionIndex <= 3; minionIndex++ {
   972  			id := fmt.Sprintf("d%d", i+minionIndex)
   973  			minion := createDiscoveryInstance(4610+minionIndex+i, id, []string{bootPeer(bootPort)})
   974  			instances = append(instances, minion)
   975  		}
   976  	}
   977  
   978  	assertMembership(t, instances, 3)
   979  	connector := createDiscoveryInstance(4623, "d13", []string{bootPeer(4611), bootPeer(4615), bootPeer(4619)})
   980  	instances = append(instances, connector)
   981  	assertMembership(t, instances, 12)
   982  	connector.Stop()
   983  	instances = instances[:len(instances)-1]
   984  	assertMembership(t, instances, 11)
   985  	stopInstances(t, instances)
   986  }
   987  
   988  func TestDisclosurePolicyWithPull(t *testing.T) {
   989  	t.Parallel()
   990  	// Scenario: run 2 groups of peers that simulate 2 organizations:
   991  	// {p0, p1, p2, p3, p4}
   992  	// {p5, p6, p7, p8, p9}
   993  	// Only peers that have an even id have external addresses
   994  	// and only these peers should be published to peers of the other group,
   995  	// while the only ones that need to know about them are peers
   996  	// that have an even id themselves.
   997  	// Furthermore, peers in different sets, should not know about internal addresses of
   998  	// other peers.
   999  
  1000  	// This is a bootstrap map that matches for each peer its own bootstrap peer.
  1001  	// In practice (production) peers should only use peers of their orgs as bootstrap peers,
  1002  	// but the discovery layer is ignorant of organizations.
  1003  	bootPeerMap := map[int]int{
  1004  		8610: 8616,
  1005  		8611: 8610,
  1006  		8612: 8610,
  1007  		8613: 8610,
  1008  		8614: 8610,
  1009  		8615: 8616,
  1010  		8616: 8610,
  1011  		8617: 8616,
  1012  		8618: 8616,
  1013  		8619: 8616,
  1014  	}
  1015  
  1016  	// This map matches each peer, the peers it should know about in the test scenario.
  1017  	peersThatShouldBeKnownToPeers := map[int][]int{
  1018  		8610: {8611, 8612, 8613, 8614, 8616, 8618},
  1019  		8611: {8610, 8612, 8613, 8614},
  1020  		8612: {8610, 8611, 8613, 8614, 8616, 8618},
  1021  		8613: {8610, 8611, 8612, 8614},
  1022  		8614: {8610, 8611, 8612, 8613, 8616, 8618},
  1023  		8615: {8616, 8617, 8618, 8619},
  1024  		8616: {8610, 8612, 8614, 8615, 8617, 8618, 8619},
  1025  		8617: {8615, 8616, 8618, 8619},
  1026  		8618: {8610, 8612, 8614, 8615, 8616, 8617, 8619},
  1027  		8619: {8615, 8616, 8617, 8618},
  1028  	}
  1029  	// Create the peers in the two groups
  1030  	instances1, instances2 := createDisjointPeerGroupsWithNoGossip(bootPeerMap)
  1031  	// Sleep a while to let them establish membership. This time should be more than enough
  1032  	// because the instances are configured to pull membership in very high frequency from
  1033  	// up to 10 peers (which results in - pulling from everyone)
  1034  	waitUntilOrFail(t, func() bool {
  1035  		for _, inst := range append(instances1, instances2...) {
  1036  			// Ensure the expected membership is equal in size to the actual membership
  1037  			// of each peer.
  1038  			portsOfKnownMembers := portsOfMembers(inst.GetMembership())
  1039  			if len(peersThatShouldBeKnownToPeers[inst.port]) != len(portsOfKnownMembers) {
  1040  				return false
  1041  			}
  1042  		}
  1043  		return true
  1044  	})
  1045  	for _, inst := range append(instances1, instances2...) {
  1046  		portsOfKnownMembers := portsOfMembers(inst.GetMembership())
  1047  		// Ensure the expected membership is equal to the actual membership
  1048  		// of each peer. the portsOfMembers returns a sorted slice so assert.Equal does the job.
  1049  		assert.Equal(t, peersThatShouldBeKnownToPeers[inst.port], portsOfKnownMembers)
  1050  		// Next, check that internal endpoints aren't leaked across groups,
  1051  		for _, knownPeer := range inst.GetMembership() {
  1052  			// If internal endpoint is known, ensure the peers are in the same group
  1053  			// unless the peer in question is a peer that has a public address.
  1054  			// We cannot control what we disclose about ourselves when we send a membership request
  1055  			if len(knownPeer.InternalEndpoint) > 0 && inst.port%2 != 0 {
  1056  				bothInGroup1 := portOfEndpoint(knownPeer.Endpoint) < 8615 && inst.port < 8615
  1057  				bothInGroup2 := portOfEndpoint(knownPeer.Endpoint) >= 8615 && inst.port >= 8615
  1058  				assert.True(t, bothInGroup1 || bothInGroup2, "%v knows about %v's internal endpoint", inst.port, knownPeer.InternalEndpoint)
  1059  			}
  1060  		}
  1061  	}
  1062  
  1063  	t.Log("Shutting down instance 0...")
  1064  	// Now, we shutdown instance 0 and ensure that peers that shouldn't know it,
  1065  	// do not know it via membership requests
  1066  	stopInstances(t, []*gossipInstance{instances1[0]})
  1067  	time.Sleep(time.Second * 6)
  1068  	for _, inst := range append(instances1[1:], instances2...) {
  1069  		if peersThatShouldBeKnownToPeers[inst.port][0] == 8610 {
  1070  			assert.Equal(t, 1, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
  1071  		} else {
  1072  			assert.Equal(t, 0, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
  1073  		}
  1074  	}
  1075  	stopInstances(t, instances1[1:])
  1076  	stopInstances(t, instances2)
  1077  }
  1078  
  1079  func createDisjointPeerGroupsWithNoGossip(bootPeerMap map[int]int) ([]*gossipInstance, []*gossipInstance) {
  1080  	instances1 := []*gossipInstance{}
  1081  	instances2 := []*gossipInstance{}
  1082  	for group := 0; group < 2; group++ {
  1083  		for i := 0; i < 5; i++ {
  1084  			group := group
  1085  			id := fmt.Sprintf("id%d", group*5+i)
  1086  			port := 8610 + group*5 + i
  1087  			bootPeers := []string{bootPeer(bootPeerMap[port])}
  1088  			pol := discPolForPeer(port)
  1089  			inst := createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(8610+group*5+i, id, bootPeers, pol)
  1090  			inst.initiateSync(defaultTestConfig.AliveExpirationTimeout/3, 10)
  1091  			if group == 0 {
  1092  				instances1 = append(instances1, inst)
  1093  			} else {
  1094  				instances2 = append(instances2, inst)
  1095  			}
  1096  		}
  1097  	}
  1098  	return instances1, instances2
  1099  }
  1100  
  1101  func discPolForPeer(selfPort int) DisclosurePolicy {
  1102  	return func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
  1103  		targetPortStr := strings.Split(remotePeer.Endpoint, ":")[1]
  1104  		targetPort, _ := strconv.ParseInt(targetPortStr, 10, 64)
  1105  		return func(msg *protoext.SignedGossipMessage) bool {
  1106  				portOfAliveMsgStr := strings.Split(msg.GetAliveMsg().Membership.Endpoint, ":")[1]
  1107  				portOfAliveMsg, _ := strconv.ParseInt(portOfAliveMsgStr, 10, 64)
  1108  
  1109  				if portOfAliveMsg < 8615 && targetPort < 8615 {
  1110  					return true
  1111  				}
  1112  				if portOfAliveMsg >= 8615 && targetPort >= 8615 {
  1113  					return true
  1114  				}
  1115  
  1116  				// Else, expose peers with even ids to other peers with even ids
  1117  				return portOfAliveMsg%2 == 0 && targetPort%2 == 0
  1118  			}, func(msg *protoext.SignedGossipMessage) *proto.Envelope {
  1119  				envelope := protoG.Clone(msg.Envelope).(*proto.Envelope)
  1120  				if selfPort < 8615 && targetPort >= 8615 {
  1121  					envelope.SecretEnvelope = nil
  1122  				}
  1123  
  1124  				if selfPort >= 8615 && targetPort < 8615 {
  1125  					envelope.SecretEnvelope = nil
  1126  				}
  1127  
  1128  				return envelope
  1129  			}
  1130  	}
  1131  }
  1132  
  1133  func TestCertificateChange(t *testing.T) {
  1134  	t.Parallel()
  1135  
  1136  	bootPeers := []string{bootPeer(42611), bootPeer(42612), bootPeer(42613)}
  1137  	p1 := createDiscoveryInstance(42611, "d1", bootPeers)
  1138  	p2 := createDiscoveryInstance(42612, "d2", bootPeers)
  1139  	p3 := createDiscoveryInstance(42613, "d3", bootPeers)
  1140  
  1141  	// Wait for membership establishment
  1142  	assertMembership(t, []*gossipInstance{p1, p2, p3}, 2)
  1143  
  1144  	// Shutdown the second peer
  1145  	waitUntilOrFailBlocking(t, p2.Stop)
  1146  
  1147  	var pingCountFrom1 uint32
  1148  	var pingCountFrom3 uint32
  1149  	// Program mocks to increment ping counters
  1150  	p1.comm.lock.Lock()
  1151  	p1.comm.mock = &mock.Mock{}
  1152  	p1.comm.mock.On("SendToPeer", mock.Anything, mock.Anything)
  1153  	p1.comm.mock.On("Ping").Run(func(arguments mock.Arguments) {
  1154  		atomic.AddUint32(&pingCountFrom1, 1)
  1155  	})
  1156  	p1.comm.lock.Unlock()
  1157  
  1158  	p3.comm.lock.Lock()
  1159  	p3.comm.mock = &mock.Mock{}
  1160  	p3.comm.mock.On("SendToPeer", mock.Anything, mock.Anything)
  1161  	p3.comm.mock.On("Ping").Run(func(arguments mock.Arguments) {
  1162  		atomic.AddUint32(&pingCountFrom3, 1)
  1163  	})
  1164  	p3.comm.lock.Unlock()
  1165  
  1166  	pingCount1 := func() uint32 {
  1167  		return atomic.LoadUint32(&pingCountFrom1)
  1168  	}
  1169  
  1170  	pingCount3 := func() uint32 {
  1171  		return atomic.LoadUint32(&pingCountFrom3)
  1172  	}
  1173  
  1174  	c1 := pingCount1()
  1175  	c3 := pingCount3()
  1176  
  1177  	// Ensure the first peer and third peer try to reconnect to it
  1178  	waitUntilTimeoutOrFail(t, func() bool {
  1179  		return pingCount1() > c1 && pingCount3() > c3
  1180  	}, timeout)
  1181  
  1182  	// Tell the first peer that the second peer's PKI-ID has changed
  1183  	// So that it will purge it from the membership entirely
  1184  	p1.comm.identitySwitch <- common.PKIidType("localhost:42612")
  1185  
  1186  	c1 = pingCount1()
  1187  	c3 = pingCount3()
  1188  	// Ensure third peer tries to reconnect to it
  1189  	waitUntilTimeoutOrFail(t, func() bool {
  1190  		return pingCount3() > c3
  1191  	}, timeout)
  1192  
  1193  	// Ensure the first peer ceases from trying
  1194  	assert.Equal(t, c1, pingCount1())
  1195  
  1196  	waitUntilOrFailBlocking(t, p1.Stop)
  1197  	waitUntilOrFailBlocking(t, p3.Stop)
  1198  }
  1199  
  1200  func TestMsgStoreExpiration(t *testing.T) {
  1201  	// Starts 4 instances, wait for membership to build, stop 2 instances
  1202  	// Check that membership in 2 running instances become 2
  1203  	// Wait for expiration and check that alive messages and related entities in maps are removed in running instances
  1204  	t.Parallel()
  1205  	nodeNum := 4
  1206  	bootPeers := []string{bootPeer(12611), bootPeer(12612)}
  1207  	instances := []*gossipInstance{}
  1208  
  1209  	inst := createDiscoveryInstance(12611, "d1", bootPeers)
  1210  	instances = append(instances, inst)
  1211  
  1212  	inst = createDiscoveryInstance(12612, "d2", bootPeers)
  1213  	instances = append(instances, inst)
  1214  
  1215  	for i := 3; i <= nodeNum; i++ {
  1216  		id := fmt.Sprintf("d%d", i)
  1217  		inst = createDiscoveryInstance(12610+i, id, bootPeers)
  1218  		instances = append(instances, inst)
  1219  	}
  1220  
  1221  	assertMembership(t, instances, nodeNum-1)
  1222  
  1223  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
  1224  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
  1225  
  1226  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
  1227  
  1228  	checkMessages := func() bool {
  1229  		for _, inst := range instances[:len(instances)-2] {
  1230  			for _, downInst := range instances[len(instances)-2:] {
  1231  				downCastInst := inst.discoveryImpl()
  1232  				downCastInst.lock.RLock()
  1233  				if _, exist := downCastInst.aliveLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1234  					downCastInst.lock.RUnlock()
  1235  					return false
  1236  				}
  1237  				if _, exist := downCastInst.deadLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1238  					downCastInst.lock.RUnlock()
  1239  					return false
  1240  				}
  1241  				if _, exist := downCastInst.id2Member[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1242  					downCastInst.lock.RUnlock()
  1243  					return false
  1244  				}
  1245  				if downCastInst.aliveMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
  1246  					downCastInst.lock.RUnlock()
  1247  					return false
  1248  				}
  1249  				if downCastInst.deadMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
  1250  					downCastInst.lock.RUnlock()
  1251  					return false
  1252  				}
  1253  				for _, am := range downCastInst.msgStore.Get() {
  1254  					m := am.(*protoext.SignedGossipMessage).GetAliveMsg()
  1255  					if bytes.Equal(m.Membership.PkiId, downInst.discoveryImpl().self.PKIid) {
  1256  						downCastInst.lock.RUnlock()
  1257  						return false
  1258  					}
  1259  				}
  1260  				downCastInst.lock.RUnlock()
  1261  			}
  1262  		}
  1263  		return true
  1264  	}
  1265  
  1266  	waitUntilTimeoutOrFail(t, checkMessages, defaultTestConfig.AliveExpirationTimeout*(msgExpirationFactor+5))
  1267  
  1268  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
  1269  
  1270  	stopInstances(t, instances[:len(instances)-2])
  1271  }
  1272  
  1273  func TestExpirationNoSecretEnvelope(t *testing.T) {
  1274  	t.Parallel()
  1275  
  1276  	l, err := zap.NewDevelopment()
  1277  	assert.NoError(t, err)
  1278  
  1279  	removed := make(chan struct{})
  1280  	logger := flogging.NewFabricLogger(l, zap.Hooks(func(entry zapcore.Entry) error {
  1281  		if strings.Contains(entry.Message, "Removing member: Endpoint: foo") {
  1282  			removed <- struct{}{}
  1283  		}
  1284  		return nil
  1285  	}))
  1286  
  1287  	msgStore := newAliveMsgStore(&gossipDiscoveryImpl{
  1288  		aliveExpirationTimeout: time.Millisecond,
  1289  		lock:                   &sync.RWMutex{},
  1290  		aliveMembership:        util.NewMembershipStore(),
  1291  		deadMembership:         util.NewMembershipStore(),
  1292  		logger:                 logger,
  1293  	})
  1294  
  1295  	msg := &proto.GossipMessage{
  1296  		Content: &proto.GossipMessage_AliveMsg{
  1297  			AliveMsg: &proto.AliveMessage{Membership: &proto.Member{
  1298  				Endpoint: "foo",
  1299  			}},
  1300  		},
  1301  	}
  1302  
  1303  	sMsg, err := protoext.NoopSign(msg)
  1304  	assert.NoError(t, err)
  1305  
  1306  	msgStore.Add(sMsg)
  1307  	select {
  1308  	case <-removed:
  1309  	case <-time.After(time.Second * 10):
  1310  		t.Fatalf("timed out")
  1311  	}
  1312  }
  1313  
  1314  func TestMsgStoreExpirationWithMembershipMessages(t *testing.T) {
  1315  	// Creates 3 discovery instances without gossip communication
  1316  	// Generates MembershipRequest msg for each instance using createMembershipRequest
  1317  	// Generates Alive msg for each instance using createAliveMessage
  1318  	// Builds membership using Alive msgs
  1319  	// Checks msgStore and related maps
  1320  	// Generates MembershipResponse msgs for each instance using createMembershipResponse
  1321  	// Generates new set of Alive msgs and processes them
  1322  	// Checks msgStore and related maps
  1323  	// Waits for expiration and checks msgStore and related maps
  1324  	// Processes stored MembershipRequest msg and checks msgStore and related maps
  1325  	// Processes stored MembershipResponse msg and checks msgStore and related maps
  1326  
  1327  	t.Parallel()
  1328  	bootPeers := []string{}
  1329  	peersNum := 3
  1330  	instances := []*gossipInstance{}
  1331  	aliveMsgs := []*protoext.SignedGossipMessage{}
  1332  	newAliveMsgs := []*protoext.SignedGossipMessage{}
  1333  	memReqMsgs := []*protoext.SignedGossipMessage{}
  1334  	memRespMsgs := make(map[int][]*proto.MembershipResponse)
  1335  
  1336  	for i := 0; i < peersNum; i++ {
  1337  		id := fmt.Sprintf("d%d", i)
  1338  		inst := createDiscoveryInstanceWithNoGossip(22610+i, id, bootPeers)
  1339  		inst.comm.disableComm = true
  1340  		instances = append(instances, inst)
  1341  	}
  1342  
  1343  	// Creating MembershipRequest messages
  1344  	for i := 0; i < peersNum; i++ {
  1345  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
  1346  		sMsg, _ := protoext.NoopSign(memReqMsg)
  1347  		memReqMsgs = append(memReqMsgs, sMsg)
  1348  	}
  1349  	// Creating Alive messages
  1350  	for i := 0; i < peersNum; i++ {
  1351  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1352  		aliveMsgs = append(aliveMsgs, aliveMsg)
  1353  	}
  1354  
  1355  	repeatForFiltered := func(n int, filter func(i int) bool, action func(i int)) {
  1356  		for i := 0; i < n; i++ {
  1357  			if filter(i) {
  1358  				continue
  1359  			}
  1360  			action(i)
  1361  		}
  1362  	}
  1363  
  1364  	// Handling Alive
  1365  	for i := 0; i < peersNum; i++ {
  1366  		for k := 0; k < peersNum; k++ {
  1367  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1368  				msg: aliveMsgs[k],
  1369  				info: &protoext.ConnectionInfo{
  1370  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1371  				},
  1372  			})
  1373  		}
  1374  	}
  1375  
  1376  	checkExistence := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, i int, step string) {
  1377  		_, exist := instances[index].discoveryImpl().aliveLastTS[string(instances[i].discoveryImpl().self.PKIid)]
  1378  		assert.True(t, exist, fmt.Sprint(step, " Data from alive msg ", i, " doesn't exist in aliveLastTS of discovery inst ", index))
  1379  
  1380  		_, exist = instances[index].discoveryImpl().id2Member[string(string(instances[i].discoveryImpl().self.PKIid))]
  1381  		assert.True(t, exist, fmt.Sprint(step, " id2Member mapping doesn't exist for alive msg ", i, " of discovery inst ", index))
  1382  
  1383  		assert.NotNil(t, instances[index].discoveryImpl().aliveMembership.MsgByID(instances[i].discoveryImpl().self.PKIid), fmt.Sprint(step, " Alive msg", i, " not exist in aliveMembership of discovery inst ", index))
  1384  
  1385  		assert.Contains(t, instances[index].discoveryImpl().msgStore.Get(), msgs[i], fmt.Sprint(step, " Alive msg ", i, "not stored in store of discovery inst ", index))
  1386  	}
  1387  
  1388  	checkAliveMsgExist := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, step string) {
  1389  		instances[index].discoveryImpl().lock.RLock()
  1390  		defer instances[index].discoveryImpl().lock.RUnlock()
  1391  		repeatForFiltered(peersNum,
  1392  			func(k int) bool {
  1393  				return k == index
  1394  			},
  1395  			func(k int) {
  1396  				checkExistence(instances, msgs, index, k, step)
  1397  			})
  1398  	}
  1399  
  1400  	// Checking is Alive was processed
  1401  	for i := 0; i < peersNum; i++ {
  1402  		checkAliveMsgExist(instances, aliveMsgs, i, "[Step 1 - processing aliveMsg]")
  1403  	}
  1404  
  1405  	// Creating MembershipResponse while all instances have full membership
  1406  	for i := 0; i < peersNum; i++ {
  1407  		peerToResponse := &NetworkMember{
  1408  			Metadata:         []byte{},
  1409  			PKIid:            []byte(fmt.Sprintf("localhost:%d", 22610+i)),
  1410  			Endpoint:         fmt.Sprintf("localhost:%d", 22610+i),
  1411  			InternalEndpoint: fmt.Sprintf("localhost:%d", 22610+i),
  1412  		}
  1413  		memRespMsgs[i] = []*proto.MembershipResponse{}
  1414  		repeatForFiltered(peersNum,
  1415  			func(k int) bool {
  1416  				return k == i
  1417  			},
  1418  			func(k int) {
  1419  				aliveMsg, _ := instances[k].discoveryImpl().createSignedAliveMessage(true)
  1420  				memResp := instances[k].discoveryImpl().createMembershipResponse(aliveMsg, peerToResponse)
  1421  				memRespMsgs[i] = append(memRespMsgs[i], memResp)
  1422  			})
  1423  	}
  1424  
  1425  	// Re-creating Alive msgs with highest seq_num, to make sure Alive msgs in memReq and memResp are older
  1426  	for i := 0; i < peersNum; i++ {
  1427  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1428  		newAliveMsgs = append(newAliveMsgs, aliveMsg)
  1429  	}
  1430  
  1431  	// Handling new Alive set
  1432  	for i := 0; i < peersNum; i++ {
  1433  		for k := 0; k < peersNum; k++ {
  1434  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1435  				msg: newAliveMsgs[k],
  1436  				info: &protoext.ConnectionInfo{
  1437  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1438  				},
  1439  			})
  1440  		}
  1441  	}
  1442  
  1443  	// Checking is new Alive was processed
  1444  	for i := 0; i < peersNum; i++ {
  1445  		checkAliveMsgExist(instances, newAliveMsgs, i, "[Step 2 - proccesing aliveMsg]")
  1446  	}
  1447  
  1448  	checkAliveMsgNotExist := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, step string) {
  1449  		instances[index].discoveryImpl().lock.RLock()
  1450  		defer instances[index].discoveryImpl().lock.RUnlock()
  1451  		assert.Empty(t, instances[index].discoveryImpl().aliveLastTS, fmt.Sprint(step, " Data from alive msg still exists in aliveLastTS of discovery inst ", index))
  1452  		assert.Empty(t, instances[index].discoveryImpl().deadLastTS, fmt.Sprint(step, " Data from alive msg still exists in deadLastTS of discovery inst ", index))
  1453  		assert.Empty(t, instances[index].discoveryImpl().id2Member, fmt.Sprint(step, " id2Member mapping still still contains data related to Alive msg: discovery inst ", index))
  1454  		assert.Empty(t, instances[index].discoveryImpl().msgStore.Get(), fmt.Sprint(step, " Expired Alive msg still stored in store of discovery inst ", index))
  1455  		assert.Zero(t, instances[index].discoveryImpl().aliveMembership.Size(), fmt.Sprint(step, " Alive membership list is not empty, discovery instance", index))
  1456  		assert.Zero(t, instances[index].discoveryImpl().deadMembership.Size(), fmt.Sprint(step, " Dead membership list is not empty, discovery instance", index))
  1457  	}
  1458  
  1459  	// Sleep until expire
  1460  	time.Sleep(defaultTestConfig.AliveExpirationTimeout * (msgExpirationFactor + 5))
  1461  
  1462  	// Checking Alive expired
  1463  	for i := 0; i < peersNum; i++ {
  1464  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step 3 - expiration in msg store]")
  1465  	}
  1466  
  1467  	// Processing old MembershipRequest
  1468  	for i := 0; i < peersNum; i++ {
  1469  		repeatForFiltered(peersNum,
  1470  			func(k int) bool {
  1471  				return k == i
  1472  			},
  1473  			func(k int) {
  1474  				instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1475  					msg: memReqMsgs[k],
  1476  					info: &protoext.ConnectionInfo{
  1477  						ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1478  					},
  1479  				})
  1480  			})
  1481  	}
  1482  
  1483  	// MembershipRequest processing didn't change anything
  1484  	for i := 0; i < peersNum; i++ {
  1485  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 4 - memReq processing after expiration]")
  1486  	}
  1487  
  1488  	// Processing old (later) Alive messages
  1489  	for i := 0; i < peersNum; i++ {
  1490  		for k := 0; k < peersNum; k++ {
  1491  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1492  				msg: aliveMsgs[k],
  1493  				info: &protoext.ConnectionInfo{
  1494  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1495  				},
  1496  			})
  1497  		}
  1498  	}
  1499  
  1500  	// Alive msg processing didn't change anything
  1501  	for i := 0; i < peersNum; i++ {
  1502  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 5.1 - after lost old aliveMsg process]")
  1503  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step 5.2 - after lost new aliveMsg process]")
  1504  	}
  1505  
  1506  	// Handling old MembershipResponse messages
  1507  	for i := 0; i < peersNum; i++ {
  1508  		respForPeer := memRespMsgs[i]
  1509  		for _, msg := range respForPeer {
  1510  			sMsg, _ := protoext.NoopSign(&proto.GossipMessage{
  1511  				Tag:   proto.GossipMessage_EMPTY,
  1512  				Nonce: uint64(0),
  1513  				Content: &proto.GossipMessage_MemRes{
  1514  					MemRes: msg,
  1515  				},
  1516  			})
  1517  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1518  				msg: sMsg,
  1519  				info: &protoext.ConnectionInfo{
  1520  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1521  				},
  1522  			})
  1523  		}
  1524  	}
  1525  
  1526  	// MembershipResponse msg processing didn't change anything
  1527  	for i := 0; i < peersNum; i++ {
  1528  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 6 - after lost MembershipResp process]")
  1529  	}
  1530  
  1531  	for i := 0; i < peersNum; i++ {
  1532  		instances[i].Stop()
  1533  	}
  1534  
  1535  }
  1536  
  1537  func TestAliveMsgStore(t *testing.T) {
  1538  	t.Parallel()
  1539  
  1540  	bootPeers := []string{}
  1541  	peersNum := 2
  1542  	instances := []*gossipInstance{}
  1543  	aliveMsgs := []*protoext.SignedGossipMessage{}
  1544  	memReqMsgs := []*protoext.SignedGossipMessage{}
  1545  
  1546  	for i := 0; i < peersNum; i++ {
  1547  		id := fmt.Sprintf("d%d", i)
  1548  		inst := createDiscoveryInstanceWithNoGossip(32610+i, id, bootPeers)
  1549  		instances = append(instances, inst)
  1550  	}
  1551  
  1552  	// Creating MembershipRequest messages
  1553  	for i := 0; i < peersNum; i++ {
  1554  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
  1555  		sMsg, _ := protoext.NoopSign(memReqMsg)
  1556  		memReqMsgs = append(memReqMsgs, sMsg)
  1557  	}
  1558  	// Creating Alive messages
  1559  	for i := 0; i < peersNum; i++ {
  1560  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1561  		aliveMsgs = append(aliveMsgs, aliveMsg)
  1562  	}
  1563  
  1564  	//Check new alive msgs
  1565  	for _, msg := range aliveMsgs {
  1566  		assert.True(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns false on new AliveMsg")
  1567  	}
  1568  
  1569  	// Add new alive msgs
  1570  	for _, msg := range aliveMsgs {
  1571  		assert.True(t, instances[0].discoveryImpl().msgStore.Add(msg), "aliveMsgStore Add returns false on new AliveMsg")
  1572  	}
  1573  
  1574  	// Check exist alive msgs
  1575  	for _, msg := range aliveMsgs {
  1576  		assert.False(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns true on existing AliveMsg")
  1577  	}
  1578  
  1579  	// Check non-alive msgs
  1580  	for _, msg := range memReqMsgs {
  1581  		assert.Panics(t, func() { instances[1].discoveryImpl().msgStore.CheckValid(msg) }, "aliveMsgStore CheckValid should panic on new MembershipRequest msg")
  1582  		assert.Panics(t, func() { instances[1].discoveryImpl().msgStore.Add(msg) }, "aliveMsgStore Add should panic on new MembershipRequest msg")
  1583  	}
  1584  }
  1585  
  1586  func TestMemRespDisclosurePol(t *testing.T) {
  1587  	t.Parallel()
  1588  	pol := func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
  1589  		return func(_ *protoext.SignedGossipMessage) bool {
  1590  				return remotePeer.Endpoint == "localhost:7880"
  1591  			}, func(m *protoext.SignedGossipMessage) *proto.Envelope {
  1592  				return m.Envelope
  1593  			}
  1594  	}
  1595  	d1 := createDiscoveryInstanceThatGossips(7878, "d1", []string{}, true, pol, defaultTestConfig)
  1596  	defer d1.Stop()
  1597  	d2 := createDiscoveryInstanceThatGossips(7879, "d2", []string{"localhost:7878"}, true, noopPolicy, defaultTestConfig)
  1598  	defer d2.Stop()
  1599  	d3 := createDiscoveryInstanceThatGossips(7880, "d3", []string{"localhost:7878"}, true, noopPolicy, defaultTestConfig)
  1600  	defer d3.Stop()
  1601  	// Both d1 and d3 know each other, and also about d2
  1602  	assertMembership(t, []*gossipInstance{d1, d3}, 2)
  1603  	// d2 doesn't know about any one because the bootstrap peer is ignoring it due to custom policy
  1604  	assertMembership(t, []*gossipInstance{d2}, 0)
  1605  	assert.Zero(t, d2.receivedMsgCount())
  1606  	assert.NotZero(t, d2.sentMsgCount())
  1607  }
  1608  
  1609  func TestMembersByID(t *testing.T) {
  1610  	members := Members{
  1611  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1612  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1613  	}
  1614  	byID := members.ByID()
  1615  	assert.Len(t, byID, 2)
  1616  	assert.Equal(t, "p0", byID["p0"].Endpoint)
  1617  	assert.Equal(t, "p1", byID["p1"].Endpoint)
  1618  }
  1619  
  1620  func TestFilter(t *testing.T) {
  1621  	members := Members{
  1622  		{PKIid: common.PKIidType("p0"), Endpoint: "p0", Properties: &proto.Properties{
  1623  			Chaincodes: []*proto.Chaincode{{Name: "cc", Version: "1.0"}},
  1624  		}},
  1625  		{PKIid: common.PKIidType("p1"), Endpoint: "p1", Properties: &proto.Properties{
  1626  			Chaincodes: []*proto.Chaincode{{Name: "cc", Version: "2.0"}},
  1627  		}},
  1628  	}
  1629  	res := members.Filter(func(member NetworkMember) bool {
  1630  		cc := member.Properties.Chaincodes[0]
  1631  		return cc.Version == "2.0" && cc.Name == "cc"
  1632  	})
  1633  	assert.Equal(t, Members{members[1]}, res)
  1634  }
  1635  
  1636  func TestMap(t *testing.T) {
  1637  	members := Members{
  1638  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1639  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1640  	}
  1641  	expectedMembers := Members{
  1642  		{PKIid: common.PKIidType("p0"), Endpoint: "p0", Properties: &proto.Properties{LedgerHeight: 2}},
  1643  		{PKIid: common.PKIidType("p1"), Endpoint: "p1", Properties: &proto.Properties{LedgerHeight: 2}},
  1644  	}
  1645  
  1646  	addProperty := func(member NetworkMember) NetworkMember {
  1647  		member.Properties = &proto.Properties{
  1648  			LedgerHeight: 2,
  1649  		}
  1650  		return member
  1651  	}
  1652  
  1653  	assert.Equal(t, expectedMembers, members.Map(addProperty))
  1654  	// Ensure original members didn't change
  1655  	assert.Nil(t, members[0].Properties)
  1656  	assert.Nil(t, members[1].Properties)
  1657  }
  1658  
  1659  func TestMembersIntersect(t *testing.T) {
  1660  	members1 := Members{
  1661  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1662  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1663  	}
  1664  	members2 := Members{
  1665  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1666  		{PKIid: common.PKIidType("p2"), Endpoint: "p2"},
  1667  	}
  1668  	assert.Equal(t, Members{{PKIid: common.PKIidType("p1"), Endpoint: "p1"}}, members1.Intersect(members2))
  1669  }
  1670  
  1671  func TestPeerIsolation(t *testing.T) {
  1672  	t.Parallel()
  1673  
  1674  	// Scenario:
  1675  	// Start 3 peers (peer0, peer1, peer2). Set peer1 as the bootstrap peer for all.
  1676  	// Stop peer0 and peer1 for a while, start them again and test if peer2 still gets full membership
  1677  
  1678  	config := defaultTestConfig
  1679  	// Use a smaller AliveExpirationTimeout than the default to reduce the running time of the test.
  1680  	config.AliveExpirationTimeout = 2 * config.AliveTimeInterval
  1681  
  1682  	peersNum := 3
  1683  	bootPeers := []string{bootPeer(7121)}
  1684  	instances := []*gossipInstance{}
  1685  	var inst *gossipInstance
  1686  
  1687  	// Start all peers and wait for full membership
  1688  	for i := 0; i < peersNum; i++ {
  1689  		id := fmt.Sprintf("d%d", i)
  1690  		inst = createDiscoveryInstanceCustomConfig(7120+i, id, bootPeers, config)
  1691  		instances = append(instances, inst)
  1692  	}
  1693  	assertMembership(t, instances, peersNum-1)
  1694  
  1695  	// Stop the first 2 peers so the third peer would stay alone
  1696  	stopInstances(t, instances[:peersNum-1])
  1697  	assertMembership(t, instances[peersNum-1:], 0)
  1698  
  1699  	// Sleep the same amount of time as it takes to remove a message from the aliveMsgStore (aliveMsgTTL)
  1700  	// Add a second as buffer
  1701  	time.Sleep(config.AliveExpirationTimeout*msgExpirationFactor + time.Second)
  1702  
  1703  	// Start again the first 2 peers and wait for all the peers to get full membership.
  1704  	// Especially, we want to test that peer2 won't be isolated
  1705  	for i := 0; i < peersNum-1; i++ {
  1706  		id := fmt.Sprintf("d%d", i)
  1707  		inst = createDiscoveryInstanceCustomConfig(7120+i, id, bootPeers, config)
  1708  		instances[i] = inst
  1709  	}
  1710  	assertMembership(t, instances, peersNum-1)
  1711  }
  1712  
  1713  func waitUntilOrFail(t *testing.T, pred func() bool) {
  1714  	waitUntilTimeoutOrFail(t, pred, timeout)
  1715  }
  1716  
  1717  func waitUntilTimeoutOrFail(t *testing.T, pred func() bool, timeout time.Duration) {
  1718  	start := time.Now()
  1719  	limit := start.UnixNano() + timeout.Nanoseconds()
  1720  	for time.Now().UnixNano() < limit {
  1721  		if pred() {
  1722  			return
  1723  		}
  1724  		time.Sleep(timeout / 10)
  1725  	}
  1726  	assert.Fail(t, "Timeout expired!")
  1727  }
  1728  
  1729  func waitUntilOrFailBlocking(t *testing.T, f func()) {
  1730  	successChan := make(chan struct{}, 1)
  1731  	go func() {
  1732  		f()
  1733  		successChan <- struct{}{}
  1734  	}()
  1735  	select {
  1736  	case <-time.NewTimer(timeout).C:
  1737  		break
  1738  	case <-successChan:
  1739  		return
  1740  	}
  1741  	assert.Fail(t, "Timeout expired!")
  1742  }
  1743  
  1744  func stopInstances(t *testing.T, instances []*gossipInstance) {
  1745  	stopAction := &sync.WaitGroup{}
  1746  	for _, inst := range instances {
  1747  		stopAction.Add(1)
  1748  		go func(inst *gossipInstance) {
  1749  			defer stopAction.Done()
  1750  			inst.Stop()
  1751  		}(inst)
  1752  	}
  1753  
  1754  	waitUntilOrFailBlocking(t, stopAction.Wait)
  1755  }
  1756  
  1757  func assertMembership(t *testing.T, instances []*gossipInstance, expectedNum int) {
  1758  	wg := sync.WaitGroup{}
  1759  	wg.Add(len(instances))
  1760  
  1761  	ctx, cancelation := context.WithTimeout(context.Background(), timeout)
  1762  	defer cancelation()
  1763  
  1764  	for _, inst := range instances {
  1765  		go func(ctx context.Context, i *gossipInstance) {
  1766  			defer wg.Done()
  1767  			for {
  1768  				select {
  1769  				case <-ctx.Done():
  1770  					return
  1771  				case <-time.After(timeout / 10):
  1772  					if len(i.GetMembership()) == expectedNum {
  1773  						return
  1774  					}
  1775  				}
  1776  			}
  1777  		}(ctx, inst)
  1778  	}
  1779  
  1780  	wg.Wait()
  1781  	assert.NoError(t, ctx.Err(), "Timeout expired!")
  1782  }
  1783  
  1784  func portsOfMembers(members []NetworkMember) []int {
  1785  	ports := make([]int, len(members))
  1786  	for i := range members {
  1787  		ports[i] = portOfEndpoint(members[i].Endpoint)
  1788  	}
  1789  	sort.Ints(ports)
  1790  	return ports
  1791  }
  1792  
  1793  func portOfEndpoint(endpoint string) int {
  1794  	port, _ := strconv.ParseInt(strings.Split(endpoint, ":")[1], 10, 64)
  1795  	return int(port)
  1796  }