github.com/kaituanwang/hyperledger@v2.0.1+incompatible/gossip/discovery/discovery_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package discovery
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"io"
    14  	"math/rand"
    15  	"net"
    16  	"sort"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"sync/atomic"
    21  	"testing"
    22  	"time"
    23  
    24  	protoG "github.com/golang/protobuf/proto"
    25  	proto "github.com/hyperledger/fabric-protos-go/gossip"
    26  	"github.com/hyperledger/fabric/common/flogging"
    27  	"github.com/hyperledger/fabric/gossip/common"
    28  	"github.com/hyperledger/fabric/gossip/gossip/msgstore"
    29  	"github.com/hyperledger/fabric/gossip/protoext"
    30  	"github.com/hyperledger/fabric/gossip/util"
    31  	"github.com/stretchr/testify/assert"
    32  	"github.com/stretchr/testify/mock"
    33  	"go.uber.org/zap"
    34  	"go.uber.org/zap/zapcore"
    35  	"google.golang.org/grpc"
    36  	"google.golang.org/grpc/connectivity"
    37  )
    38  
    39  var timeout = time.Second * time.Duration(15)
    40  
    41  var aliveTimeInterval = time.Duration(time.Millisecond * 300)
    42  var defaultTestConfig = DiscoveryConfig{
    43  	AliveTimeInterval:            aliveTimeInterval,
    44  	AliveExpirationTimeout:       10 * aliveTimeInterval,
    45  	AliveExpirationCheckInterval: aliveTimeInterval,
    46  	ReconnectInterval:            10 * aliveTimeInterval,
    47  }
    48  
    49  func init() {
    50  	util.SetupTestLogging()
    51  	maxConnectionAttempts = 10000
    52  }
    53  
    54  type dummyReceivedMessage struct {
    55  	msg  *protoext.SignedGossipMessage
    56  	info *protoext.ConnectionInfo
    57  }
    58  
    59  func (*dummyReceivedMessage) Respond(msg *proto.GossipMessage) {
    60  	panic("implement me")
    61  }
    62  
    63  func (rm *dummyReceivedMessage) GetGossipMessage() *protoext.SignedGossipMessage {
    64  	return rm.msg
    65  }
    66  
    67  func (*dummyReceivedMessage) GetSourceEnvelope() *proto.Envelope {
    68  	panic("implement me")
    69  }
    70  
    71  func (rm *dummyReceivedMessage) GetConnectionInfo() *protoext.ConnectionInfo {
    72  	return rm.info
    73  }
    74  
    75  func (*dummyReceivedMessage) Ack(err error) {
    76  	panic("implement me")
    77  }
    78  
    79  type dummyCommModule struct {
    80  	validatedMessages chan *protoext.SignedGossipMessage
    81  	msgsReceived      uint32
    82  	msgsSent          uint32
    83  	id                string
    84  	identitySwitch    chan common.PKIidType
    85  	presumeDead       chan common.PKIidType
    86  	detectedDead      chan string
    87  	streams           map[string]proto.Gossip_GossipStreamClient
    88  	conns             map[string]*grpc.ClientConn
    89  	lock              *sync.RWMutex
    90  	incMsgs           chan protoext.ReceivedMessage
    91  	lastSeqs          map[string]uint64
    92  	shouldGossip      bool
    93  	disableComm       bool
    94  	mock              *mock.Mock
    95  }
    96  
    97  type gossipInstance struct {
    98  	msgInterceptor func(*protoext.SignedGossipMessage)
    99  	comm           *dummyCommModule
   100  	Discovery
   101  	gRGCserv      *grpc.Server
   102  	lsnr          net.Listener
   103  	shouldGossip  bool
   104  	syncInitiator *time.Ticker
   105  	stopChan      chan struct{}
   106  	port          int
   107  }
   108  
   109  func (comm *dummyCommModule) ValidateAliveMsg(am *protoext.SignedGossipMessage) bool {
   110  	comm.lock.RLock()
   111  	c := comm.validatedMessages
   112  	comm.lock.RUnlock()
   113  
   114  	if c != nil {
   115  		c <- am
   116  	}
   117  	return true
   118  }
   119  
   120  func (comm *dummyCommModule) IdentitySwitch() <-chan common.PKIidType {
   121  	return comm.identitySwitch
   122  }
   123  
   124  func (comm *dummyCommModule) recordValidation(validatedMessages chan *protoext.SignedGossipMessage) {
   125  	comm.lock.Lock()
   126  	defer comm.lock.Unlock()
   127  	comm.validatedMessages = validatedMessages
   128  }
   129  
   130  func (comm *dummyCommModule) SignMessage(am *proto.GossipMessage, internalEndpoint string) *proto.Envelope {
   131  	protoext.NoopSign(am)
   132  
   133  	secret := &proto.Secret{
   134  		Content: &proto.Secret_InternalEndpoint{
   135  			InternalEndpoint: internalEndpoint,
   136  		},
   137  	}
   138  	signer := func(msg []byte) ([]byte, error) {
   139  		return nil, nil
   140  	}
   141  	s, _ := protoext.NoopSign(am)
   142  	env := s.Envelope
   143  	protoext.SignSecret(env, signer, secret)
   144  	return env
   145  }
   146  
   147  func (comm *dummyCommModule) Gossip(msg *protoext.SignedGossipMessage) {
   148  	if !comm.shouldGossip || comm.disableComm {
   149  		return
   150  	}
   151  	comm.lock.Lock()
   152  	defer comm.lock.Unlock()
   153  	for _, conn := range comm.streams {
   154  		conn.Send(msg.Envelope)
   155  	}
   156  }
   157  
   158  func (comm *dummyCommModule) Forward(msg protoext.ReceivedMessage) {
   159  	if !comm.shouldGossip || comm.disableComm {
   160  		return
   161  	}
   162  	comm.lock.Lock()
   163  	defer comm.lock.Unlock()
   164  	for _, conn := range comm.streams {
   165  		conn.Send(msg.GetGossipMessage().Envelope)
   166  	}
   167  }
   168  
   169  func (comm *dummyCommModule) SendToPeer(peer *NetworkMember, msg *protoext.SignedGossipMessage) {
   170  	if comm.disableComm {
   171  		return
   172  	}
   173  	comm.lock.RLock()
   174  	_, exists := comm.streams[peer.Endpoint]
   175  	mock := comm.mock
   176  	comm.lock.RUnlock()
   177  
   178  	if mock != nil {
   179  		mock.Called(peer, msg)
   180  	}
   181  
   182  	if !exists {
   183  		if comm.Ping(peer) == false {
   184  			fmt.Printf("Ping to %v failed\n", peer.Endpoint)
   185  			return
   186  		}
   187  	}
   188  	comm.lock.Lock()
   189  	s, _ := protoext.NoopSign(msg.GossipMessage)
   190  	comm.streams[peer.Endpoint].Send(s.Envelope)
   191  	comm.lock.Unlock()
   192  	atomic.AddUint32(&comm.msgsSent, 1)
   193  }
   194  
   195  func (comm *dummyCommModule) Ping(peer *NetworkMember) bool {
   196  	if comm.disableComm {
   197  		return false
   198  	}
   199  	comm.lock.Lock()
   200  	defer comm.lock.Unlock()
   201  
   202  	if comm.mock != nil {
   203  		comm.mock.Called()
   204  	}
   205  
   206  	_, alreadyExists := comm.streams[peer.Endpoint]
   207  	conn := comm.conns[peer.Endpoint]
   208  	if !alreadyExists || conn.GetState() == connectivity.Shutdown {
   209  		newConn, err := grpc.Dial(peer.Endpoint, grpc.WithInsecure())
   210  		if err != nil {
   211  			return false
   212  		}
   213  		if stream, err := proto.NewGossipClient(newConn).GossipStream(context.Background()); err == nil {
   214  			comm.conns[peer.Endpoint] = newConn
   215  			comm.streams[peer.Endpoint] = stream
   216  			return true
   217  		}
   218  		return false
   219  	}
   220  	if _, err := proto.NewGossipClient(conn).Ping(context.Background(), &proto.Empty{}); err != nil {
   221  		return false
   222  	}
   223  	return true
   224  }
   225  
   226  func (comm *dummyCommModule) Accept() <-chan protoext.ReceivedMessage {
   227  	return comm.incMsgs
   228  }
   229  
   230  func (comm *dummyCommModule) PresumedDead() <-chan common.PKIidType {
   231  	return comm.presumeDead
   232  }
   233  
   234  func (comm *dummyCommModule) CloseConn(peer *NetworkMember) {
   235  	comm.lock.Lock()
   236  	defer comm.lock.Unlock()
   237  
   238  	if _, exists := comm.streams[peer.Endpoint]; !exists {
   239  		return
   240  	}
   241  
   242  	comm.streams[peer.Endpoint].CloseSend()
   243  	comm.conns[peer.Endpoint].Close()
   244  }
   245  
   246  func (g *gossipInstance) receivedMsgCount() int {
   247  	return int(atomic.LoadUint32(&g.comm.msgsReceived))
   248  }
   249  
   250  func (g *gossipInstance) sentMsgCount() int {
   251  	return int(atomic.LoadUint32(&g.comm.msgsSent))
   252  }
   253  
   254  func (g *gossipInstance) discoveryImpl() *gossipDiscoveryImpl {
   255  	return g.Discovery.(*gossipDiscoveryImpl)
   256  }
   257  
   258  func (g *gossipInstance) initiateSync(frequency time.Duration, peerNum int) {
   259  	g.syncInitiator = time.NewTicker(frequency)
   260  	g.stopChan = make(chan struct{})
   261  	go func() {
   262  		for {
   263  			select {
   264  			case <-g.syncInitiator.C:
   265  				g.Discovery.InitiateSync(peerNum)
   266  			case <-g.stopChan:
   267  				g.syncInitiator.Stop()
   268  				return
   269  			}
   270  		}
   271  	}()
   272  }
   273  
   274  func (g *gossipInstance) GossipStream(stream proto.Gossip_GossipStreamServer) error {
   275  	for {
   276  		envelope, err := stream.Recv()
   277  		if err == io.EOF {
   278  			return nil
   279  		}
   280  		if err != nil {
   281  			return err
   282  		}
   283  		lgr := g.Discovery.(*gossipDiscoveryImpl).logger
   284  		gMsg, err := protoext.EnvelopeToGossipMessage(envelope)
   285  		if err != nil {
   286  			lgr.Warning("Failed deserializing GossipMessage from envelope:", err)
   287  			continue
   288  		}
   289  		g.msgInterceptor(gMsg)
   290  
   291  		lgr.Debug(g.Discovery.Self().Endpoint, "Got message:", gMsg)
   292  		g.comm.incMsgs <- &dummyReceivedMessage{
   293  			msg: gMsg,
   294  			info: &protoext.ConnectionInfo{
   295  				ID: common.PKIidType("testID"),
   296  			},
   297  		}
   298  		atomic.AddUint32(&g.comm.msgsReceived, 1)
   299  
   300  		if aliveMsg := gMsg.GetAliveMsg(); aliveMsg != nil {
   301  			g.tryForwardMessage(gMsg)
   302  		}
   303  	}
   304  }
   305  
   306  func (g *gossipInstance) tryForwardMessage(msg *protoext.SignedGossipMessage) {
   307  	g.comm.lock.Lock()
   308  
   309  	aliveMsg := msg.GetAliveMsg()
   310  
   311  	forward := false
   312  	id := string(aliveMsg.Membership.PkiId)
   313  	seqNum := aliveMsg.Timestamp.SeqNum
   314  	if last, exists := g.comm.lastSeqs[id]; exists {
   315  		if last < seqNum {
   316  			g.comm.lastSeqs[id] = seqNum
   317  			forward = true
   318  		}
   319  	} else {
   320  		g.comm.lastSeqs[id] = seqNum
   321  		forward = true
   322  	}
   323  
   324  	g.comm.lock.Unlock()
   325  
   326  	if forward {
   327  		g.comm.Gossip(msg)
   328  	}
   329  }
   330  
   331  func (g *gossipInstance) Stop() {
   332  	if g.syncInitiator != nil {
   333  		g.stopChan <- struct{}{}
   334  	}
   335  	g.gRGCserv.Stop()
   336  	g.lsnr.Close()
   337  	g.comm.lock.Lock()
   338  	for _, stream := range g.comm.streams {
   339  		stream.CloseSend()
   340  	}
   341  	g.comm.lock.Unlock()
   342  	for _, conn := range g.comm.conns {
   343  		conn.Close()
   344  	}
   345  	g.Discovery.Stop()
   346  }
   347  
   348  func (g *gossipInstance) Ping(context.Context, *proto.Empty) (*proto.Empty, error) {
   349  	return &proto.Empty{}, nil
   350  }
   351  
   352  var noopPolicy = func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
   353  	return func(msg *protoext.SignedGossipMessage) bool {
   354  			return true
   355  		}, func(message *protoext.SignedGossipMessage) *proto.Envelope {
   356  			return message.Envelope
   357  		}
   358  }
   359  
   360  func createDiscoveryInstance(port int, id string, bootstrapPeers []string) *gossipInstance {
   361  	return createDiscoveryInstanceCustomConfig(port, id, bootstrapPeers, defaultTestConfig)
   362  }
   363  
   364  func createDiscoveryInstanceCustomConfig(port int, id string, bootstrapPeers []string, config DiscoveryConfig) *gossipInstance {
   365  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, true, noopPolicy, config)
   366  }
   367  
   368  func createDiscoveryInstanceWithNoGossip(port int, id string, bootstrapPeers []string) *gossipInstance {
   369  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, noopPolicy, defaultTestConfig)
   370  }
   371  
   372  func createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(port int, id string, bootstrapPeers []string, pol DisclosurePolicy) *gossipInstance {
   373  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, pol, defaultTestConfig)
   374  }
   375  
   376  func createDiscoveryInstanceThatGossips(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy, config DiscoveryConfig) *gossipInstance {
   377  	return createDiscoveryInstanceThatGossipsWithInterceptors(port, id, bootstrapPeers, shouldGossip, pol, func(_ *protoext.SignedGossipMessage) {}, config)
   378  }
   379  
   380  func createDiscoveryInstanceThatGossipsWithInterceptors(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy, f func(*protoext.SignedGossipMessage), config DiscoveryConfig) *gossipInstance {
   381  	comm := &dummyCommModule{
   382  		conns:          make(map[string]*grpc.ClientConn),
   383  		streams:        make(map[string]proto.Gossip_GossipStreamClient),
   384  		incMsgs:        make(chan protoext.ReceivedMessage, 1000),
   385  		presumeDead:    make(chan common.PKIidType, 10000),
   386  		id:             id,
   387  		detectedDead:   make(chan string, 10000),
   388  		identitySwitch: make(chan common.PKIidType),
   389  		lock:           &sync.RWMutex{},
   390  		lastSeqs:       make(map[string]uint64),
   391  		shouldGossip:   shouldGossip,
   392  		disableComm:    false,
   393  	}
   394  
   395  	endpoint := fmt.Sprintf("localhost:%d", port)
   396  	self := NetworkMember{
   397  		Metadata:         []byte{},
   398  		PKIid:            []byte(endpoint),
   399  		Endpoint:         endpoint,
   400  		InternalEndpoint: endpoint,
   401  	}
   402  
   403  	listenAddress := fmt.Sprintf("%s:%d", "", port)
   404  	ll, err := net.Listen("tcp", listenAddress)
   405  	if err != nil {
   406  		fmt.Printf("Error listening on %v, %v", listenAddress, err)
   407  	}
   408  	s := grpc.NewServer()
   409  
   410  	config.BootstrapPeers = bootstrapPeers
   411  	discSvc := NewDiscoveryService(self, comm, comm, pol, config)
   412  	for _, bootPeer := range bootstrapPeers {
   413  		bp := bootPeer
   414  		discSvc.Connect(NetworkMember{Endpoint: bp, InternalEndpoint: bootPeer}, func() (*PeerIdentification, error) {
   415  			return &PeerIdentification{SelfOrg: true, ID: common.PKIidType(bp)}, nil
   416  		})
   417  	}
   418  
   419  	gossInst := &gossipInstance{comm: comm, gRGCserv: s, Discovery: discSvc, lsnr: ll, shouldGossip: shouldGossip, port: port, msgInterceptor: f}
   420  
   421  	proto.RegisterGossipServer(s, gossInst)
   422  	go s.Serve(ll)
   423  
   424  	return gossInst
   425  }
   426  
   427  func bootPeer(port int) string {
   428  	return fmt.Sprintf("localhost:%d", port)
   429  }
   430  
   431  func TestHasExternalEndpoints(t *testing.T) {
   432  	memberWithEndpoint := NetworkMember{Endpoint: "foo"}
   433  	memberWithoutEndpoint := NetworkMember{}
   434  
   435  	assert.True(t, HasExternalEndpoint(memberWithEndpoint))
   436  	assert.False(t, HasExternalEndpoint(memberWithoutEndpoint))
   437  }
   438  
   439  func TestToString(t *testing.T) {
   440  	nm := NetworkMember{
   441  		Endpoint:         "a",
   442  		InternalEndpoint: "b",
   443  	}
   444  	assert.Equal(t, "b", nm.PreferredEndpoint())
   445  	nm = NetworkMember{
   446  		Endpoint: "a",
   447  	}
   448  	assert.Equal(t, "a", nm.PreferredEndpoint())
   449  
   450  	now := time.Now()
   451  	ts := &timestamp{
   452  		incTime: now,
   453  		seqNum:  uint64(42),
   454  	}
   455  	assert.Equal(t, fmt.Sprintf("%d, %d", now.UnixNano(), 42), fmt.Sprint(ts))
   456  }
   457  
   458  func TestNetworkMemberString(t *testing.T) {
   459  	tests := []struct {
   460  		input    NetworkMember
   461  		expected string
   462  	}{
   463  		{
   464  			input:    NetworkMember{Endpoint: "endpoint", InternalEndpoint: "internal-endpoint", PKIid: common.PKIidType{0, 1, 2, 3}, Metadata: nil},
   465  			expected: "Endpoint: endpoint, InternalEndpoint: internal-endpoint, PKI-ID: 00010203, Metadata: ",
   466  		},
   467  		{
   468  			input:    NetworkMember{Endpoint: "endpoint", InternalEndpoint: "internal-endpoint", PKIid: common.PKIidType{0, 1, 2, 3}, Metadata: []byte{4, 5, 6, 7}},
   469  			expected: "Endpoint: endpoint, InternalEndpoint: internal-endpoint, PKI-ID: 00010203, Metadata: 04050607",
   470  		},
   471  	}
   472  	for _, tt := range tests {
   473  		assert.Equal(t, tt.expected, tt.input.String())
   474  	}
   475  }
   476  
   477  func TestBadInput(t *testing.T) {
   478  	inst := createDiscoveryInstance(2048, fmt.Sprintf("d%d", 0), []string{})
   479  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(nil)
   480  	s, _ := protoext.NoopSign(&proto.GossipMessage{
   481  		Content: &proto.GossipMessage_DataMsg{
   482  			DataMsg: &proto.DataMessage{},
   483  		},
   484  	})
   485  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(&dummyReceivedMessage{
   486  		msg: s,
   487  		info: &protoext.ConnectionInfo{
   488  			ID: common.PKIidType("testID"),
   489  		},
   490  	})
   491  }
   492  
   493  func TestConnect(t *testing.T) {
   494  	t.Parallel()
   495  	nodeNum := 10
   496  	instances := []*gossipInstance{}
   497  	firstSentMemReqMsgs := make(chan *protoext.SignedGossipMessage, nodeNum)
   498  	for i := 0; i < nodeNum; i++ {
   499  		inst := createDiscoveryInstance(7611+i, fmt.Sprintf("d%d", i), []string{})
   500  
   501  		inst.comm.lock.Lock()
   502  		inst.comm.mock = &mock.Mock{}
   503  		inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   504  			inst := inst
   505  			msg := arguments.Get(1).(*protoext.SignedGossipMessage)
   506  			if req := msg.GetMemReq(); req != nil {
   507  				selfMsg, _ := protoext.EnvelopeToGossipMessage(req.SelfInformation)
   508  				firstSentMemReqMsgs <- selfMsg
   509  				inst.comm.lock.Lock()
   510  				inst.comm.mock = nil
   511  				inst.comm.lock.Unlock()
   512  			}
   513  		})
   514  		inst.comm.mock.On("Ping", mock.Anything)
   515  		inst.comm.lock.Unlock()
   516  		instances = append(instances, inst)
   517  		j := (i + 1) % 10
   518  		endpoint := fmt.Sprintf("localhost:%d", 7611+j)
   519  		netMember2Connect2 := NetworkMember{Endpoint: endpoint, PKIid: []byte(endpoint)}
   520  		inst.Connect(netMember2Connect2, func() (identification *PeerIdentification, err error) {
   521  			return &PeerIdentification{SelfOrg: false, ID: nil}, nil
   522  		})
   523  	}
   524  
   525  	time.Sleep(time.Second * 3)
   526  	fullMembership := func() bool {
   527  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   528  	}
   529  	waitUntilOrFail(t, fullMembership)
   530  
   531  	discInst := instances[rand.Intn(len(instances))].Discovery.(*gossipDiscoveryImpl)
   532  	mr, _ := discInst.createMembershipRequest(true)
   533  	am, _ := protoext.EnvelopeToGossipMessage(mr.GetMemReq().SelfInformation)
   534  	assert.NotNil(t, am.SecretEnvelope)
   535  	mr2, _ := discInst.createMembershipRequest(false)
   536  	am, _ = protoext.EnvelopeToGossipMessage(mr2.GetMemReq().SelfInformation)
   537  	assert.Nil(t, am.SecretEnvelope)
   538  	stopInstances(t, instances)
   539  	assert.Len(t, firstSentMemReqMsgs, 10)
   540  	close(firstSentMemReqMsgs)
   541  	for firstSentSelfMsg := range firstSentMemReqMsgs {
   542  		assert.Nil(t, firstSentSelfMsg.Envelope.SecretEnvelope)
   543  	}
   544  }
   545  
   546  func TestValidation(t *testing.T) {
   547  	t.Parallel()
   548  
   549  	// Scenarios: This test contains the following sub-tests:
   550  	// 1) alive message validation: a message is validated <==> it entered the message store
   551  	// 2) request/response message validation:
   552  	//   2.1) alive messages from membership requests/responses are validated.
   553  	//   2.2) once alive messages enter the message store, reception of them via membership responses
   554  	//        doesn't trigger validation, but via membership requests - do.
   555  
   556  	wrapReceivedMessage := func(msg *protoext.SignedGossipMessage) protoext.ReceivedMessage {
   557  		return &dummyReceivedMessage{
   558  			msg: msg,
   559  			info: &protoext.ConnectionInfo{
   560  				ID: common.PKIidType("testID"),
   561  			},
   562  		}
   563  	}
   564  
   565  	requestMessagesReceived := make(chan *protoext.SignedGossipMessage, 100)
   566  	responseMessagesReceived := make(chan *protoext.SignedGossipMessage, 100)
   567  	aliveMessagesReceived := make(chan *protoext.SignedGossipMessage, 5000)
   568  
   569  	var membershipRequest atomic.Value
   570  	var membershipResponseWithAlivePeers atomic.Value
   571  	var membershipResponseWithDeadPeers atomic.Value
   572  
   573  	recordMembershipRequest := func(req *protoext.SignedGossipMessage) {
   574  		msg, _ := protoext.EnvelopeToGossipMessage(req.GetMemReq().SelfInformation)
   575  		membershipRequest.Store(req)
   576  		requestMessagesReceived <- msg
   577  	}
   578  
   579  	recordMembershipResponse := func(res *protoext.SignedGossipMessage) {
   580  		memRes := res.GetMemRes()
   581  		if len(memRes.GetAlive()) > 0 {
   582  			membershipResponseWithAlivePeers.Store(res)
   583  		}
   584  		if len(memRes.GetDead()) > 0 {
   585  			membershipResponseWithDeadPeers.Store(res)
   586  		}
   587  		responseMessagesReceived <- res
   588  	}
   589  
   590  	interceptor := func(msg *protoext.SignedGossipMessage) {
   591  		if memReq := msg.GetMemReq(); memReq != nil {
   592  			recordMembershipRequest(msg)
   593  			return
   594  		}
   595  
   596  		if memRes := msg.GetMemRes(); memRes != nil {
   597  			recordMembershipResponse(msg)
   598  			return
   599  		}
   600  		// Else, it's an alive message
   601  		aliveMessagesReceived <- msg
   602  	}
   603  
   604  	// p3 is the boot peer of p1, and p1 is the boot peer of p2.
   605  	// p1 sends a (membership) request to p3, and receives a (membership) response back.
   606  	// p2 sends a (membership) request to p1.
   607  	// Therefore, p1 receives both a membership request and a response.
   608  	p1 := createDiscoveryInstanceThatGossipsWithInterceptors(4675, "p1", []string{bootPeer(4677)}, true, noopPolicy, interceptor, defaultTestConfig)
   609  	p2 := createDiscoveryInstance(4676, "p2", []string{bootPeer(4675)})
   610  	p3 := createDiscoveryInstance(4677, "p3", nil)
   611  	instances := []*gossipInstance{p1, p2, p3}
   612  
   613  	assertMembership(t, instances, 2)
   614  
   615  	instances = []*gossipInstance{p1, p2}
   616  	// Stop p3 and wait until its death is detected
   617  	p3.Stop()
   618  	assertMembership(t, instances, 1)
   619  	// Force p1 to send a membership request so it can receive back a response
   620  	// with dead peers.
   621  	p1.InitiateSync(1)
   622  
   623  	// Wait until a response with a dead peer is received
   624  	waitUntilOrFail(t, func() bool {
   625  		return membershipResponseWithDeadPeers.Load() != nil
   626  	})
   627  
   628  	p1.Stop()
   629  	p2.Stop()
   630  
   631  	close(aliveMessagesReceived)
   632  	t.Log("Recorded", len(aliveMessagesReceived), "alive messages")
   633  	t.Log("Recorded", len(requestMessagesReceived), "request messages")
   634  	t.Log("Recorded", len(responseMessagesReceived), "response messages")
   635  
   636  	// Ensure we got alive messages from membership requests and from membership responses
   637  	assert.NotNil(t, membershipResponseWithAlivePeers.Load())
   638  	assert.NotNil(t, membershipRequest.Load())
   639  
   640  	t.Run("alive message", func(t *testing.T) {
   641  		t.Parallel()
   642  		// Spawn a new peer - p4
   643  		p4 := createDiscoveryInstance(4678, "p1", nil)
   644  		defer p4.Stop()
   645  		// Record messages validated
   646  		validatedMessages := make(chan *protoext.SignedGossipMessage, 5000)
   647  		p4.comm.recordValidation(validatedMessages)
   648  		tmpMsgs := make(chan *protoext.SignedGossipMessage, 5000)
   649  		// Replay the messages sent to p1 into p4, and also save them into a temporary channel
   650  		for msg := range aliveMessagesReceived {
   651  			p4.comm.incMsgs <- wrapReceivedMessage(msg)
   652  			tmpMsgs <- msg
   653  		}
   654  
   655  		// Simulate the messages received by p4 into the message store
   656  		policy := protoext.NewGossipMessageComparator(0)
   657  		msgStore := msgstore.NewMessageStore(policy, func(_ interface{}) {})
   658  		close(tmpMsgs)
   659  		for msg := range tmpMsgs {
   660  			if msgStore.Add(msg) {
   661  				// Ensure the message was verified if it can be added into the message store
   662  				expectedMessage := <-validatedMessages
   663  				assert.Equal(t, expectedMessage, msg)
   664  			}
   665  		}
   666  		// Ensure we didn't validate any other messages.
   667  		assert.Empty(t, validatedMessages)
   668  	})
   669  
   670  	req := membershipRequest.Load().(*protoext.SignedGossipMessage)
   671  	res := membershipResponseWithDeadPeers.Load().(*protoext.SignedGossipMessage)
   672  	// Ensure the membership response contains both alive and dead peers
   673  	assert.Len(t, res.GetMemRes().GetAlive(), 2)
   674  	assert.Len(t, res.GetMemRes().GetDead(), 1)
   675  
   676  	for _, testCase := range []struct {
   677  		name                  string
   678  		expectedAliveMessages int
   679  		port                  int
   680  		message               *protoext.SignedGossipMessage
   681  		shouldBeReValidated   bool
   682  	}{
   683  		{
   684  			name:                  "membership request",
   685  			expectedAliveMessages: 1,
   686  			message:               req,
   687  			port:                  4679,
   688  			shouldBeReValidated:   true,
   689  		},
   690  		{
   691  			name:                  "membership response",
   692  			expectedAliveMessages: 3,
   693  			message:               res,
   694  			port:                  4680,
   695  		},
   696  	} {
   697  		testCase := testCase
   698  		t.Run(testCase.name, func(t *testing.T) {
   699  			t.Parallel()
   700  			p := createDiscoveryInstance(testCase.port, "p", nil)
   701  			defer p.Stop()
   702  			// Record messages validated
   703  			validatedMessages := make(chan *protoext.SignedGossipMessage, testCase.expectedAliveMessages)
   704  			p.comm.recordValidation(validatedMessages)
   705  
   706  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   707  			// Ensure all messages were validated
   708  			for i := 0; i < testCase.expectedAliveMessages; i++ {
   709  				validatedMsg := <-validatedMessages
   710  				// send the message directly to be included in the message store
   711  				p.comm.incMsgs <- wrapReceivedMessage(validatedMsg)
   712  			}
   713  			// Wait for the messages to be validated
   714  			for i := 0; i < testCase.expectedAliveMessages; i++ {
   715  				<-validatedMessages
   716  			}
   717  			// Not more than testCase.expectedAliveMessages should have been validated
   718  			assert.Empty(t, validatedMessages)
   719  
   720  			if !testCase.shouldBeReValidated {
   721  				// Re-submit the message twice and ensure it wasn't validated.
   722  				// If it is validated, panic would occur because an enqueue to the validatesMessages channel
   723  				// would be attempted and the channel is closed.
   724  				close(validatedMessages)
   725  			}
   726  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   727  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   728  			// Wait until the size of the channel is zero. It means at least one message was processed.
   729  			waitUntilOrFail(t, func() bool {
   730  				return len(p.comm.incMsgs) == 0
   731  			})
   732  		})
   733  	}
   734  }
   735  
   736  func TestUpdate(t *testing.T) {
   737  	t.Parallel()
   738  	nodeNum := 5
   739  	bootPeers := []string{bootPeer(6611), bootPeer(6612)}
   740  	instances := []*gossipInstance{}
   741  
   742  	inst := createDiscoveryInstance(6611, "d1", bootPeers)
   743  	instances = append(instances, inst)
   744  
   745  	inst = createDiscoveryInstance(6612, "d2", bootPeers)
   746  	instances = append(instances, inst)
   747  
   748  	for i := 3; i <= nodeNum; i++ {
   749  		id := fmt.Sprintf("d%d", i)
   750  		inst = createDiscoveryInstance(6610+i, id, bootPeers)
   751  		instances = append(instances, inst)
   752  	}
   753  
   754  	fullMembership := func() bool {
   755  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   756  	}
   757  
   758  	waitUntilOrFail(t, fullMembership)
   759  
   760  	instances[0].UpdateMetadata([]byte("bla bla"))
   761  	instances[nodeNum-1].UpdateEndpoint("localhost:5511")
   762  
   763  	checkMembership := func() bool {
   764  		for _, member := range instances[nodeNum-1].GetMembership() {
   765  			if string(member.PKIid) == instances[0].comm.id {
   766  				if "bla bla" != string(member.Metadata) {
   767  					return false
   768  				}
   769  			}
   770  		}
   771  
   772  		for _, member := range instances[0].GetMembership() {
   773  			if string(member.PKIid) == instances[nodeNum-1].comm.id {
   774  				if "localhost:5511" != string(member.Endpoint) {
   775  					return false
   776  				}
   777  			}
   778  		}
   779  		return true
   780  	}
   781  
   782  	waitUntilOrFail(t, checkMembership)
   783  	stopInstances(t, instances)
   784  }
   785  
   786  func TestInitiateSync(t *testing.T) {
   787  	t.Parallel()
   788  	nodeNum := 10
   789  	bootPeers := []string{bootPeer(3611), bootPeer(3612)}
   790  	instances := []*gossipInstance{}
   791  
   792  	toDie := int32(0)
   793  	for i := 1; i <= nodeNum; i++ {
   794  		id := fmt.Sprintf("d%d", i)
   795  		inst := createDiscoveryInstanceWithNoGossip(3610+i, id, bootPeers)
   796  		instances = append(instances, inst)
   797  		go func() {
   798  			for {
   799  				if atomic.LoadInt32(&toDie) == int32(1) {
   800  					return
   801  				}
   802  				time.Sleep(defaultTestConfig.AliveExpirationTimeout / 3)
   803  				inst.InitiateSync(9)
   804  			}
   805  		}()
   806  	}
   807  	time.Sleep(defaultTestConfig.AliveExpirationTimeout * 4)
   808  	assertMembership(t, instances, nodeNum-1)
   809  	atomic.StoreInt32(&toDie, int32(1))
   810  	stopInstances(t, instances)
   811  }
   812  
   813  func TestSelf(t *testing.T) {
   814  	t.Parallel()
   815  	inst := createDiscoveryInstance(13463, "d1", []string{})
   816  	defer inst.Stop()
   817  	env := inst.Self().Envelope
   818  	sMsg, err := protoext.EnvelopeToGossipMessage(env)
   819  	assert.NoError(t, err)
   820  	member := sMsg.GetAliveMsg().Membership
   821  	assert.Equal(t, "localhost:13463", member.Endpoint)
   822  	assert.Equal(t, []byte("localhost:13463"), member.PkiId)
   823  
   824  	assert.Equal(t, "localhost:13463", inst.Self().Endpoint)
   825  	assert.Equal(t, common.PKIidType("localhost:13463"), inst.Self().PKIid)
   826  }
   827  
   828  func TestExpiration(t *testing.T) {
   829  	t.Parallel()
   830  	nodeNum := 5
   831  	bootPeers := []string{bootPeer(2611), bootPeer(2612)}
   832  	instances := []*gossipInstance{}
   833  
   834  	inst := createDiscoveryInstance(2611, "d1", bootPeers)
   835  	instances = append(instances, inst)
   836  
   837  	inst = createDiscoveryInstance(2612, "d2", bootPeers)
   838  	instances = append(instances, inst)
   839  
   840  	for i := 3; i <= nodeNum; i++ {
   841  		id := fmt.Sprintf("d%d", i)
   842  		inst = createDiscoveryInstance(2610+i, id, bootPeers)
   843  		instances = append(instances, inst)
   844  	}
   845  
   846  	assertMembership(t, instances, nodeNum-1)
   847  
   848  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
   849  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
   850  
   851  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
   852  
   853  	stopAction := &sync.WaitGroup{}
   854  	for i, inst := range instances {
   855  		if i+2 == nodeNum {
   856  			break
   857  		}
   858  		stopAction.Add(1)
   859  		go func(inst *gossipInstance) {
   860  			defer stopAction.Done()
   861  			inst.Stop()
   862  		}(inst)
   863  	}
   864  
   865  	waitUntilOrFailBlocking(t, stopAction.Wait)
   866  }
   867  
   868  func TestGetFullMembership(t *testing.T) {
   869  	t.Parallel()
   870  	nodeNum := 15
   871  	bootPeers := []string{bootPeer(5511), bootPeer(5512)}
   872  	instances := []*gossipInstance{}
   873  	var inst *gossipInstance
   874  
   875  	for i := 3; i <= nodeNum; i++ {
   876  		id := fmt.Sprintf("d%d", i)
   877  		inst = createDiscoveryInstance(5510+i, id, bootPeers)
   878  		instances = append(instances, inst)
   879  	}
   880  
   881  	time.Sleep(time.Second)
   882  
   883  	inst = createDiscoveryInstance(5511, "d1", bootPeers)
   884  	instances = append(instances, inst)
   885  
   886  	inst = createDiscoveryInstance(5512, "d2", bootPeers)
   887  	instances = append(instances, inst)
   888  
   889  	assertMembership(t, instances, nodeNum-1)
   890  
   891  	// Ensure that internal endpoint was propagated to everyone
   892  	for _, inst := range instances {
   893  		for _, member := range inst.GetMembership() {
   894  			assert.NotEmpty(t, member.InternalEndpoint)
   895  			assert.NotEmpty(t, member.Endpoint)
   896  		}
   897  	}
   898  
   899  	// Check that Lookup() is valid
   900  	for _, inst := range instances {
   901  		for _, member := range inst.GetMembership() {
   902  			assert.Equal(t, string(member.PKIid), inst.Lookup(member.PKIid).Endpoint)
   903  			assert.Equal(t, member.PKIid, inst.Lookup(member.PKIid).PKIid)
   904  		}
   905  	}
   906  
   907  	stopInstances(t, instances)
   908  }
   909  
   910  func TestGossipDiscoveryStopping(t *testing.T) {
   911  	t.Parallel()
   912  	inst := createDiscoveryInstance(9611, "d1", []string{bootPeer(9611)})
   913  	time.Sleep(time.Second)
   914  	waitUntilOrFailBlocking(t, inst.Stop)
   915  }
   916  
   917  func TestGossipDiscoverySkipConnectingToLocalhostBootstrap(t *testing.T) {
   918  	t.Parallel()
   919  	inst := createDiscoveryInstance(11611, "d1", []string{"localhost:11611", "127.0.0.1:11611"})
   920  	inst.comm.lock.Lock()
   921  	inst.comm.mock = &mock.Mock{}
   922  	inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(mock.Arguments) {
   923  		t.Fatal("Should not have connected to any peer")
   924  	})
   925  	inst.comm.mock.On("Ping", mock.Anything).Run(func(mock.Arguments) {
   926  		t.Fatal("Should not have connected to any peer")
   927  	})
   928  	inst.comm.lock.Unlock()
   929  	time.Sleep(time.Second * 3)
   930  	waitUntilOrFailBlocking(t, inst.Stop)
   931  }
   932  
   933  func TestConvergence(t *testing.T) {
   934  	t.Parallel()
   935  	// scenario:
   936  	// {boot peer: [peer list]}
   937  	// {d1: d2, d3, d4}
   938  	// {d5: d6, d7, d8}
   939  	// {d9: d10, d11, d12}
   940  	// connect all boot peers with d13
   941  	// take down d13
   942  	// ensure still full membership
   943  	instances := []*gossipInstance{}
   944  	for _, i := range []int{1, 5, 9} {
   945  		bootPort := 4610 + i
   946  		id := fmt.Sprintf("d%d", i)
   947  		leader := createDiscoveryInstance(bootPort, id, []string{})
   948  		instances = append(instances, leader)
   949  		for minionIndex := 1; minionIndex <= 3; minionIndex++ {
   950  			id := fmt.Sprintf("d%d", i+minionIndex)
   951  			minion := createDiscoveryInstance(4610+minionIndex+i, id, []string{bootPeer(bootPort)})
   952  			instances = append(instances, minion)
   953  		}
   954  	}
   955  
   956  	assertMembership(t, instances, 3)
   957  	connector := createDiscoveryInstance(4623, "d13", []string{bootPeer(4611), bootPeer(4615), bootPeer(4619)})
   958  	instances = append(instances, connector)
   959  	assertMembership(t, instances, 12)
   960  	connector.Stop()
   961  	instances = instances[:len(instances)-1]
   962  	assertMembership(t, instances, 11)
   963  	stopInstances(t, instances)
   964  }
   965  
   966  func TestDisclosurePolicyWithPull(t *testing.T) {
   967  	t.Parallel()
   968  	// Scenario: run 2 groups of peers that simulate 2 organizations:
   969  	// {p0, p1, p2, p3, p4}
   970  	// {p5, p6, p7, p8, p9}
   971  	// Only peers that have an even id have external addresses
   972  	// and only these peers should be published to peers of the other group,
   973  	// while the only ones that need to know about them are peers
   974  	// that have an even id themselves.
   975  	// Furthermore, peers in different sets, should not know about internal addresses of
   976  	// other peers.
   977  
   978  	// This is a bootstrap map that matches for each peer its own bootstrap peer.
   979  	// In practice (production) peers should only use peers of their orgs as bootstrap peers,
   980  	// but the discovery layer is ignorant of organizations.
   981  	bootPeerMap := map[int]int{
   982  		8610: 8616,
   983  		8611: 8610,
   984  		8612: 8610,
   985  		8613: 8610,
   986  		8614: 8610,
   987  		8615: 8616,
   988  		8616: 8610,
   989  		8617: 8616,
   990  		8618: 8616,
   991  		8619: 8616,
   992  	}
   993  
   994  	// This map matches each peer, the peers it should know about in the test scenario.
   995  	peersThatShouldBeKnownToPeers := map[int][]int{
   996  		8610: {8611, 8612, 8613, 8614, 8616, 8618},
   997  		8611: {8610, 8612, 8613, 8614},
   998  		8612: {8610, 8611, 8613, 8614, 8616, 8618},
   999  		8613: {8610, 8611, 8612, 8614},
  1000  		8614: {8610, 8611, 8612, 8613, 8616, 8618},
  1001  		8615: {8616, 8617, 8618, 8619},
  1002  		8616: {8610, 8612, 8614, 8615, 8617, 8618, 8619},
  1003  		8617: {8615, 8616, 8618, 8619},
  1004  		8618: {8610, 8612, 8614, 8615, 8616, 8617, 8619},
  1005  		8619: {8615, 8616, 8617, 8618},
  1006  	}
  1007  	// Create the peers in the two groups
  1008  	instances1, instances2 := createDisjointPeerGroupsWithNoGossip(bootPeerMap)
  1009  	// Sleep a while to let them establish membership. This time should be more than enough
  1010  	// because the instances are configured to pull membership in very high frequency from
  1011  	// up to 10 peers (which results in - pulling from everyone)
  1012  	waitUntilOrFail(t, func() bool {
  1013  		for _, inst := range append(instances1, instances2...) {
  1014  			// Ensure the expected membership is equal in size to the actual membership
  1015  			// of each peer.
  1016  			portsOfKnownMembers := portsOfMembers(inst.GetMembership())
  1017  			if len(peersThatShouldBeKnownToPeers[inst.port]) != len(portsOfKnownMembers) {
  1018  				return false
  1019  			}
  1020  		}
  1021  		return true
  1022  	})
  1023  	for _, inst := range append(instances1, instances2...) {
  1024  		portsOfKnownMembers := portsOfMembers(inst.GetMembership())
  1025  		// Ensure the expected membership is equal to the actual membership
  1026  		// of each peer. the portsOfMembers returns a sorted slice so assert.Equal does the job.
  1027  		assert.Equal(t, peersThatShouldBeKnownToPeers[inst.port], portsOfKnownMembers)
  1028  		// Next, check that internal endpoints aren't leaked across groups,
  1029  		for _, knownPeer := range inst.GetMembership() {
  1030  			// If internal endpoint is known, ensure the peers are in the same group
  1031  			// unless the peer in question is a peer that has a public address.
  1032  			// We cannot control what we disclose about ourselves when we send a membership request
  1033  			if len(knownPeer.InternalEndpoint) > 0 && inst.port%2 != 0 {
  1034  				bothInGroup1 := portOfEndpoint(knownPeer.Endpoint) < 8615 && inst.port < 8615
  1035  				bothInGroup2 := portOfEndpoint(knownPeer.Endpoint) >= 8615 && inst.port >= 8615
  1036  				assert.True(t, bothInGroup1 || bothInGroup2, "%v knows about %v's internal endpoint", inst.port, knownPeer.InternalEndpoint)
  1037  			}
  1038  		}
  1039  	}
  1040  
  1041  	t.Log("Shutting down instance 0...")
  1042  	// Now, we shutdown instance 0 and ensure that peers that shouldn't know it,
  1043  	// do not know it via membership requests
  1044  	stopInstances(t, []*gossipInstance{instances1[0]})
  1045  	time.Sleep(time.Second * 6)
  1046  	for _, inst := range append(instances1[1:], instances2...) {
  1047  		if peersThatShouldBeKnownToPeers[inst.port][0] == 8610 {
  1048  			assert.Equal(t, 1, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
  1049  		} else {
  1050  			assert.Equal(t, 0, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
  1051  		}
  1052  	}
  1053  	stopInstances(t, instances1[1:])
  1054  	stopInstances(t, instances2)
  1055  }
  1056  
  1057  func createDisjointPeerGroupsWithNoGossip(bootPeerMap map[int]int) ([]*gossipInstance, []*gossipInstance) {
  1058  	instances1 := []*gossipInstance{}
  1059  	instances2 := []*gossipInstance{}
  1060  	for group := 0; group < 2; group++ {
  1061  		for i := 0; i < 5; i++ {
  1062  			group := group
  1063  			id := fmt.Sprintf("id%d", group*5+i)
  1064  			port := 8610 + group*5 + i
  1065  			bootPeers := []string{bootPeer(bootPeerMap[port])}
  1066  			pol := discPolForPeer(port)
  1067  			inst := createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(8610+group*5+i, id, bootPeers, pol)
  1068  			inst.initiateSync(defaultTestConfig.AliveExpirationTimeout/3, 10)
  1069  			if group == 0 {
  1070  				instances1 = append(instances1, inst)
  1071  			} else {
  1072  				instances2 = append(instances2, inst)
  1073  			}
  1074  		}
  1075  	}
  1076  	return instances1, instances2
  1077  }
  1078  
  1079  func discPolForPeer(selfPort int) DisclosurePolicy {
  1080  	return func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
  1081  		targetPortStr := strings.Split(remotePeer.Endpoint, ":")[1]
  1082  		targetPort, _ := strconv.ParseInt(targetPortStr, 10, 64)
  1083  		return func(msg *protoext.SignedGossipMessage) bool {
  1084  				portOfAliveMsgStr := strings.Split(msg.GetAliveMsg().Membership.Endpoint, ":")[1]
  1085  				portOfAliveMsg, _ := strconv.ParseInt(portOfAliveMsgStr, 10, 64)
  1086  
  1087  				if portOfAliveMsg < 8615 && targetPort < 8615 {
  1088  					return true
  1089  				}
  1090  				if portOfAliveMsg >= 8615 && targetPort >= 8615 {
  1091  					return true
  1092  				}
  1093  
  1094  				// Else, expose peers with even ids to other peers with even ids
  1095  				return portOfAliveMsg%2 == 0 && targetPort%2 == 0
  1096  			}, func(msg *protoext.SignedGossipMessage) *proto.Envelope {
  1097  				envelope := protoG.Clone(msg.Envelope).(*proto.Envelope)
  1098  				if selfPort < 8615 && targetPort >= 8615 {
  1099  					envelope.SecretEnvelope = nil
  1100  				}
  1101  
  1102  				if selfPort >= 8615 && targetPort < 8615 {
  1103  					envelope.SecretEnvelope = nil
  1104  				}
  1105  
  1106  				return envelope
  1107  			}
  1108  	}
  1109  }
  1110  
  1111  func TestCertificateChange(t *testing.T) {
  1112  	t.Parallel()
  1113  
  1114  	bootPeers := []string{bootPeer(42611), bootPeer(42612), bootPeer(42613)}
  1115  	p1 := createDiscoveryInstance(42611, "d1", bootPeers)
  1116  	p2 := createDiscoveryInstance(42612, "d2", bootPeers)
  1117  	p3 := createDiscoveryInstance(42613, "d3", bootPeers)
  1118  
  1119  	// Wait for membership establishment
  1120  	assertMembership(t, []*gossipInstance{p1, p2, p3}, 2)
  1121  
  1122  	// Shutdown the second peer
  1123  	waitUntilOrFailBlocking(t, p2.Stop)
  1124  
  1125  	var pingCountFrom1 uint32
  1126  	var pingCountFrom3 uint32
  1127  	// Program mocks to increment ping counters
  1128  	p1.comm.lock.Lock()
  1129  	p1.comm.mock = &mock.Mock{}
  1130  	p1.comm.mock.On("SendToPeer", mock.Anything, mock.Anything)
  1131  	p1.comm.mock.On("Ping").Run(func(arguments mock.Arguments) {
  1132  		atomic.AddUint32(&pingCountFrom1, 1)
  1133  	})
  1134  	p1.comm.lock.Unlock()
  1135  
  1136  	p3.comm.lock.Lock()
  1137  	p3.comm.mock = &mock.Mock{}
  1138  	p3.comm.mock.On("SendToPeer", mock.Anything, mock.Anything)
  1139  	p3.comm.mock.On("Ping").Run(func(arguments mock.Arguments) {
  1140  		atomic.AddUint32(&pingCountFrom3, 1)
  1141  	})
  1142  	p3.comm.lock.Unlock()
  1143  
  1144  	pingCount1 := func() uint32 {
  1145  		return atomic.LoadUint32(&pingCountFrom1)
  1146  	}
  1147  
  1148  	pingCount3 := func() uint32 {
  1149  		return atomic.LoadUint32(&pingCountFrom3)
  1150  	}
  1151  
  1152  	c1 := pingCount1()
  1153  	c3 := pingCount3()
  1154  
  1155  	// Ensure the first peer and third peer try to reconnect to it
  1156  	waitUntilTimeoutOrFail(t, func() bool {
  1157  		return pingCount1() > c1 && pingCount3() > c3
  1158  	}, timeout)
  1159  
  1160  	// Tell the first peer that the second peer's PKI-ID has changed
  1161  	// So that it will purge it from the membership entirely
  1162  	p1.comm.identitySwitch <- common.PKIidType("localhost:42612")
  1163  
  1164  	c1 = pingCount1()
  1165  	c3 = pingCount3()
  1166  	// Ensure third peer tries to reconnect to it
  1167  	waitUntilTimeoutOrFail(t, func() bool {
  1168  		return pingCount3() > c3
  1169  	}, timeout)
  1170  
  1171  	// Ensure the first peer ceases from trying
  1172  	assert.Equal(t, c1, pingCount1())
  1173  
  1174  	waitUntilOrFailBlocking(t, p1.Stop)
  1175  	waitUntilOrFailBlocking(t, p3.Stop)
  1176  }
  1177  
  1178  func TestMsgStoreExpiration(t *testing.T) {
  1179  	// Starts 4 instances, wait for membership to build, stop 2 instances
  1180  	// Check that membership in 2 running instances become 2
  1181  	// Wait for expiration and check that alive messages and related entities in maps are removed in running instances
  1182  	t.Parallel()
  1183  	nodeNum := 4
  1184  	bootPeers := []string{bootPeer(12611), bootPeer(12612)}
  1185  	instances := []*gossipInstance{}
  1186  
  1187  	inst := createDiscoveryInstance(12611, "d1", bootPeers)
  1188  	instances = append(instances, inst)
  1189  
  1190  	inst = createDiscoveryInstance(12612, "d2", bootPeers)
  1191  	instances = append(instances, inst)
  1192  
  1193  	for i := 3; i <= nodeNum; i++ {
  1194  		id := fmt.Sprintf("d%d", i)
  1195  		inst = createDiscoveryInstance(12610+i, id, bootPeers)
  1196  		instances = append(instances, inst)
  1197  	}
  1198  
  1199  	assertMembership(t, instances, nodeNum-1)
  1200  
  1201  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
  1202  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
  1203  
  1204  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
  1205  
  1206  	checkMessages := func() bool {
  1207  		for _, inst := range instances[:len(instances)-2] {
  1208  			for _, downInst := range instances[len(instances)-2:] {
  1209  				downCastInst := inst.discoveryImpl()
  1210  				downCastInst.lock.RLock()
  1211  				if _, exist := downCastInst.aliveLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1212  					downCastInst.lock.RUnlock()
  1213  					return false
  1214  				}
  1215  				if _, exist := downCastInst.deadLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1216  					downCastInst.lock.RUnlock()
  1217  					return false
  1218  				}
  1219  				if _, exist := downCastInst.id2Member[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1220  					downCastInst.lock.RUnlock()
  1221  					return false
  1222  				}
  1223  				if downCastInst.aliveMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
  1224  					downCastInst.lock.RUnlock()
  1225  					return false
  1226  				}
  1227  				if downCastInst.deadMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
  1228  					downCastInst.lock.RUnlock()
  1229  					return false
  1230  				}
  1231  				for _, am := range downCastInst.msgStore.Get() {
  1232  					m := am.(*protoext.SignedGossipMessage).GetAliveMsg()
  1233  					if bytes.Equal(m.Membership.PkiId, downInst.discoveryImpl().self.PKIid) {
  1234  						downCastInst.lock.RUnlock()
  1235  						return false
  1236  					}
  1237  				}
  1238  				downCastInst.lock.RUnlock()
  1239  			}
  1240  		}
  1241  		return true
  1242  	}
  1243  
  1244  	waitUntilTimeoutOrFail(t, checkMessages, defaultTestConfig.AliveExpirationTimeout*(msgExpirationFactor+5))
  1245  
  1246  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
  1247  
  1248  	stopInstances(t, instances[:len(instances)-2])
  1249  }
  1250  
  1251  func TestExpirationNoSecretEnvelope(t *testing.T) {
  1252  	t.Parallel()
  1253  
  1254  	l, err := zap.NewDevelopment()
  1255  	assert.NoError(t, err)
  1256  
  1257  	removed := make(chan struct{})
  1258  	logger := flogging.NewFabricLogger(l, zap.Hooks(func(entry zapcore.Entry) error {
  1259  		if strings.Contains(entry.Message, "Removing member: Endpoint: foo") {
  1260  			removed <- struct{}{}
  1261  		}
  1262  		return nil
  1263  	}))
  1264  
  1265  	msgStore := newAliveMsgStore(&gossipDiscoveryImpl{
  1266  		aliveExpirationTimeout: time.Millisecond,
  1267  		lock:                   &sync.RWMutex{},
  1268  		aliveMembership:        util.NewMembershipStore(),
  1269  		deadMembership:         util.NewMembershipStore(),
  1270  		logger:                 logger,
  1271  	})
  1272  
  1273  	msg := &proto.GossipMessage{
  1274  		Content: &proto.GossipMessage_AliveMsg{
  1275  			AliveMsg: &proto.AliveMessage{Membership: &proto.Member{
  1276  				Endpoint: "foo",
  1277  			}},
  1278  		},
  1279  	}
  1280  
  1281  	sMsg, err := protoext.NoopSign(msg)
  1282  	assert.NoError(t, err)
  1283  
  1284  	msgStore.Add(sMsg)
  1285  	select {
  1286  	case <-removed:
  1287  	case <-time.After(time.Second * 10):
  1288  		t.Fatalf("timed out")
  1289  	}
  1290  }
  1291  
  1292  func TestMsgStoreExpirationWithMembershipMessages(t *testing.T) {
  1293  	// Creates 3 discovery instances without gossip communication
  1294  	// Generates MembershipRequest msg for each instance using createMembershipRequest
  1295  	// Generates Alive msg for each instance using createAliveMessage
  1296  	// Builds membership using Alive msgs
  1297  	// Checks msgStore and related maps
  1298  	// Generates MembershipResponse msgs for each instance using createMembershipResponse
  1299  	// Generates new set of Alive msgs and processes them
  1300  	// Checks msgStore and related maps
  1301  	// Waits for expiration and checks msgStore and related maps
  1302  	// Processes stored MembershipRequest msg and checks msgStore and related maps
  1303  	// Processes stored MembershipResponse msg and checks msgStore and related maps
  1304  
  1305  	t.Parallel()
  1306  	bootPeers := []string{}
  1307  	peersNum := 3
  1308  	instances := []*gossipInstance{}
  1309  	aliveMsgs := []*protoext.SignedGossipMessage{}
  1310  	newAliveMsgs := []*protoext.SignedGossipMessage{}
  1311  	memReqMsgs := []*protoext.SignedGossipMessage{}
  1312  	memRespMsgs := make(map[int][]*proto.MembershipResponse)
  1313  
  1314  	for i := 0; i < peersNum; i++ {
  1315  		id := fmt.Sprintf("d%d", i)
  1316  		inst := createDiscoveryInstanceWithNoGossip(22610+i, id, bootPeers)
  1317  		inst.comm.disableComm = true
  1318  		instances = append(instances, inst)
  1319  	}
  1320  
  1321  	// Creating MembershipRequest messages
  1322  	for i := 0; i < peersNum; i++ {
  1323  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
  1324  		sMsg, _ := protoext.NoopSign(memReqMsg)
  1325  		memReqMsgs = append(memReqMsgs, sMsg)
  1326  	}
  1327  	// Creating Alive messages
  1328  	for i := 0; i < peersNum; i++ {
  1329  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1330  		aliveMsgs = append(aliveMsgs, aliveMsg)
  1331  	}
  1332  
  1333  	repeatForFiltered := func(n int, filter func(i int) bool, action func(i int)) {
  1334  		for i := 0; i < n; i++ {
  1335  			if filter(i) {
  1336  				continue
  1337  			}
  1338  			action(i)
  1339  		}
  1340  	}
  1341  
  1342  	// Handling Alive
  1343  	for i := 0; i < peersNum; i++ {
  1344  		for k := 0; k < peersNum; k++ {
  1345  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1346  				msg: aliveMsgs[k],
  1347  				info: &protoext.ConnectionInfo{
  1348  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1349  				},
  1350  			})
  1351  		}
  1352  	}
  1353  
  1354  	checkExistence := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, i int, step string) {
  1355  		_, exist := instances[index].discoveryImpl().aliveLastTS[string(instances[i].discoveryImpl().self.PKIid)]
  1356  		assert.True(t, exist, fmt.Sprint(step, " Data from alive msg ", i, " doesn't exist in aliveLastTS of discovery inst ", index))
  1357  
  1358  		_, exist = instances[index].discoveryImpl().id2Member[string(string(instances[i].discoveryImpl().self.PKIid))]
  1359  		assert.True(t, exist, fmt.Sprint(step, " id2Member mapping doesn't exist for alive msg ", i, " of discovery inst ", index))
  1360  
  1361  		assert.NotNil(t, instances[index].discoveryImpl().aliveMembership.MsgByID(instances[i].discoveryImpl().self.PKIid), fmt.Sprint(step, " Alive msg", i, " not exist in aliveMembership of discovery inst ", index))
  1362  
  1363  		assert.Contains(t, instances[index].discoveryImpl().msgStore.Get(), msgs[i], fmt.Sprint(step, " Alive msg ", i, "not stored in store of discovery inst ", index))
  1364  	}
  1365  
  1366  	checkAliveMsgExist := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, step string) {
  1367  		instances[index].discoveryImpl().lock.RLock()
  1368  		defer instances[index].discoveryImpl().lock.RUnlock()
  1369  		repeatForFiltered(peersNum,
  1370  			func(k int) bool {
  1371  				return k == index
  1372  			},
  1373  			func(k int) {
  1374  				checkExistence(instances, msgs, index, k, step)
  1375  			})
  1376  	}
  1377  
  1378  	// Checking is Alive was processed
  1379  	for i := 0; i < peersNum; i++ {
  1380  		checkAliveMsgExist(instances, aliveMsgs, i, "[Step 1 - processing aliveMsg]")
  1381  	}
  1382  
  1383  	// Creating MembershipResponse while all instances have full membership
  1384  	for i := 0; i < peersNum; i++ {
  1385  		peerToResponse := &NetworkMember{
  1386  			Metadata:         []byte{},
  1387  			PKIid:            []byte(fmt.Sprintf("localhost:%d", 22610+i)),
  1388  			Endpoint:         fmt.Sprintf("localhost:%d", 22610+i),
  1389  			InternalEndpoint: fmt.Sprintf("localhost:%d", 22610+i),
  1390  		}
  1391  		memRespMsgs[i] = []*proto.MembershipResponse{}
  1392  		repeatForFiltered(peersNum,
  1393  			func(k int) bool {
  1394  				return k == i
  1395  			},
  1396  			func(k int) {
  1397  				aliveMsg, _ := instances[k].discoveryImpl().createSignedAliveMessage(true)
  1398  				memResp := instances[k].discoveryImpl().createMembershipResponse(aliveMsg, peerToResponse)
  1399  				memRespMsgs[i] = append(memRespMsgs[i], memResp)
  1400  			})
  1401  	}
  1402  
  1403  	// Re-creating Alive msgs with highest seq_num, to make sure Alive msgs in memReq and memResp are older
  1404  	for i := 0; i < peersNum; i++ {
  1405  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1406  		newAliveMsgs = append(newAliveMsgs, aliveMsg)
  1407  	}
  1408  
  1409  	// Handling new Alive set
  1410  	for i := 0; i < peersNum; i++ {
  1411  		for k := 0; k < peersNum; k++ {
  1412  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1413  				msg: newAliveMsgs[k],
  1414  				info: &protoext.ConnectionInfo{
  1415  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1416  				},
  1417  			})
  1418  		}
  1419  	}
  1420  
  1421  	// Checking is new Alive was processed
  1422  	for i := 0; i < peersNum; i++ {
  1423  		checkAliveMsgExist(instances, newAliveMsgs, i, "[Step 2 - proccesing aliveMsg]")
  1424  	}
  1425  
  1426  	checkAliveMsgNotExist := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, step string) {
  1427  		instances[index].discoveryImpl().lock.RLock()
  1428  		defer instances[index].discoveryImpl().lock.RUnlock()
  1429  		assert.Empty(t, instances[index].discoveryImpl().aliveLastTS, fmt.Sprint(step, " Data from alive msg still exists in aliveLastTS of discovery inst ", index))
  1430  		assert.Empty(t, instances[index].discoveryImpl().deadLastTS, fmt.Sprint(step, " Data from alive msg still exists in deadLastTS of discovery inst ", index))
  1431  		assert.Empty(t, instances[index].discoveryImpl().id2Member, fmt.Sprint(step, " id2Member mapping still still contains data related to Alive msg: discovery inst ", index))
  1432  		assert.Empty(t, instances[index].discoveryImpl().msgStore.Get(), fmt.Sprint(step, " Expired Alive msg still stored in store of discovery inst ", index))
  1433  		assert.Zero(t, instances[index].discoveryImpl().aliveMembership.Size(), fmt.Sprint(step, " Alive membership list is not empty, discovery instance", index))
  1434  		assert.Zero(t, instances[index].discoveryImpl().deadMembership.Size(), fmt.Sprint(step, " Dead membership list is not empty, discovery instance", index))
  1435  	}
  1436  
  1437  	// Sleep until expire
  1438  	time.Sleep(defaultTestConfig.AliveExpirationTimeout * (msgExpirationFactor + 5))
  1439  
  1440  	// Checking Alive expired
  1441  	for i := 0; i < peersNum; i++ {
  1442  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step 3 - expiration in msg store]")
  1443  	}
  1444  
  1445  	// Processing old MembershipRequest
  1446  	for i := 0; i < peersNum; i++ {
  1447  		repeatForFiltered(peersNum,
  1448  			func(k int) bool {
  1449  				return k == i
  1450  			},
  1451  			func(k int) {
  1452  				instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1453  					msg: memReqMsgs[k],
  1454  					info: &protoext.ConnectionInfo{
  1455  						ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1456  					},
  1457  				})
  1458  			})
  1459  	}
  1460  
  1461  	// MembershipRequest processing didn't change anything
  1462  	for i := 0; i < peersNum; i++ {
  1463  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 4 - memReq processing after expiration]")
  1464  	}
  1465  
  1466  	// Processing old (later) Alive messages
  1467  	for i := 0; i < peersNum; i++ {
  1468  		for k := 0; k < peersNum; k++ {
  1469  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1470  				msg: aliveMsgs[k],
  1471  				info: &protoext.ConnectionInfo{
  1472  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1473  				},
  1474  			})
  1475  		}
  1476  	}
  1477  
  1478  	// Alive msg processing didn't change anything
  1479  	for i := 0; i < peersNum; i++ {
  1480  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 5.1 - after lost old aliveMsg process]")
  1481  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step 5.2 - after lost new aliveMsg process]")
  1482  	}
  1483  
  1484  	// Handling old MembershipResponse messages
  1485  	for i := 0; i < peersNum; i++ {
  1486  		respForPeer := memRespMsgs[i]
  1487  		for _, msg := range respForPeer {
  1488  			sMsg, _ := protoext.NoopSign(&proto.GossipMessage{
  1489  				Tag:   proto.GossipMessage_EMPTY,
  1490  				Nonce: uint64(0),
  1491  				Content: &proto.GossipMessage_MemRes{
  1492  					MemRes: msg,
  1493  				},
  1494  			})
  1495  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1496  				msg: sMsg,
  1497  				info: &protoext.ConnectionInfo{
  1498  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1499  				},
  1500  			})
  1501  		}
  1502  	}
  1503  
  1504  	// MembershipResponse msg processing didn't change anything
  1505  	for i := 0; i < peersNum; i++ {
  1506  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 6 - after lost MembershipResp process]")
  1507  	}
  1508  
  1509  	for i := 0; i < peersNum; i++ {
  1510  		instances[i].Stop()
  1511  	}
  1512  
  1513  }
  1514  
  1515  func TestAliveMsgStore(t *testing.T) {
  1516  	t.Parallel()
  1517  
  1518  	bootPeers := []string{}
  1519  	peersNum := 2
  1520  	instances := []*gossipInstance{}
  1521  	aliveMsgs := []*protoext.SignedGossipMessage{}
  1522  	memReqMsgs := []*protoext.SignedGossipMessage{}
  1523  
  1524  	for i := 0; i < peersNum; i++ {
  1525  		id := fmt.Sprintf("d%d", i)
  1526  		inst := createDiscoveryInstanceWithNoGossip(32610+i, id, bootPeers)
  1527  		instances = append(instances, inst)
  1528  	}
  1529  
  1530  	// Creating MembershipRequest messages
  1531  	for i := 0; i < peersNum; i++ {
  1532  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
  1533  		sMsg, _ := protoext.NoopSign(memReqMsg)
  1534  		memReqMsgs = append(memReqMsgs, sMsg)
  1535  	}
  1536  	// Creating Alive messages
  1537  	for i := 0; i < peersNum; i++ {
  1538  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1539  		aliveMsgs = append(aliveMsgs, aliveMsg)
  1540  	}
  1541  
  1542  	//Check new alive msgs
  1543  	for _, msg := range aliveMsgs {
  1544  		assert.True(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns false on new AliveMsg")
  1545  	}
  1546  
  1547  	// Add new alive msgs
  1548  	for _, msg := range aliveMsgs {
  1549  		assert.True(t, instances[0].discoveryImpl().msgStore.Add(msg), "aliveMsgStore Add returns false on new AliveMsg")
  1550  	}
  1551  
  1552  	// Check exist alive msgs
  1553  	for _, msg := range aliveMsgs {
  1554  		assert.False(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns true on existing AliveMsg")
  1555  	}
  1556  
  1557  	// Check non-alive msgs
  1558  	for _, msg := range memReqMsgs {
  1559  		assert.Panics(t, func() { instances[1].discoveryImpl().msgStore.CheckValid(msg) }, "aliveMsgStore CheckValid should panic on new MembershipRequest msg")
  1560  		assert.Panics(t, func() { instances[1].discoveryImpl().msgStore.Add(msg) }, "aliveMsgStore Add should panic on new MembershipRequest msg")
  1561  	}
  1562  }
  1563  
  1564  func TestMemRespDisclosurePol(t *testing.T) {
  1565  	t.Parallel()
  1566  	pol := func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
  1567  		return func(_ *protoext.SignedGossipMessage) bool {
  1568  				return remotePeer.Endpoint == "localhost:7880"
  1569  			}, func(m *protoext.SignedGossipMessage) *proto.Envelope {
  1570  				return m.Envelope
  1571  			}
  1572  	}
  1573  	d1 := createDiscoveryInstanceThatGossips(7878, "d1", []string{}, true, pol, defaultTestConfig)
  1574  	defer d1.Stop()
  1575  	d2 := createDiscoveryInstanceThatGossips(7879, "d2", []string{"localhost:7878"}, true, noopPolicy, defaultTestConfig)
  1576  	defer d2.Stop()
  1577  	d3 := createDiscoveryInstanceThatGossips(7880, "d3", []string{"localhost:7878"}, true, noopPolicy, defaultTestConfig)
  1578  	defer d3.Stop()
  1579  	// Both d1 and d3 know each other, and also about d2
  1580  	assertMembership(t, []*gossipInstance{d1, d3}, 2)
  1581  	// d2 doesn't know about any one because the bootstrap peer is ignoring it due to custom policy
  1582  	assertMembership(t, []*gossipInstance{d2}, 0)
  1583  	assert.Zero(t, d2.receivedMsgCount())
  1584  	assert.NotZero(t, d2.sentMsgCount())
  1585  }
  1586  
  1587  func TestMembersByID(t *testing.T) {
  1588  	members := Members{
  1589  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1590  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1591  	}
  1592  	byID := members.ByID()
  1593  	assert.Len(t, byID, 2)
  1594  	assert.Equal(t, "p0", byID["p0"].Endpoint)
  1595  	assert.Equal(t, "p1", byID["p1"].Endpoint)
  1596  }
  1597  
  1598  func TestFilter(t *testing.T) {
  1599  	members := Members{
  1600  		{PKIid: common.PKIidType("p0"), Endpoint: "p0", Properties: &proto.Properties{
  1601  			Chaincodes: []*proto.Chaincode{{Name: "cc", Version: "1.0"}},
  1602  		}},
  1603  		{PKIid: common.PKIidType("p1"), Endpoint: "p1", Properties: &proto.Properties{
  1604  			Chaincodes: []*proto.Chaincode{{Name: "cc", Version: "2.0"}},
  1605  		}},
  1606  	}
  1607  	res := members.Filter(func(member NetworkMember) bool {
  1608  		cc := member.Properties.Chaincodes[0]
  1609  		return cc.Version == "2.0" && cc.Name == "cc"
  1610  	})
  1611  	assert.Equal(t, Members{members[1]}, res)
  1612  }
  1613  
  1614  func TestMap(t *testing.T) {
  1615  	members := Members{
  1616  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1617  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1618  	}
  1619  	expectedMembers := Members{
  1620  		{PKIid: common.PKIidType("p0"), Endpoint: "p0", Properties: &proto.Properties{LedgerHeight: 2}},
  1621  		{PKIid: common.PKIidType("p1"), Endpoint: "p1", Properties: &proto.Properties{LedgerHeight: 2}},
  1622  	}
  1623  
  1624  	addProperty := func(member NetworkMember) NetworkMember {
  1625  		member.Properties = &proto.Properties{
  1626  			LedgerHeight: 2,
  1627  		}
  1628  		return member
  1629  	}
  1630  
  1631  	assert.Equal(t, expectedMembers, members.Map(addProperty))
  1632  	// Ensure original members didn't change
  1633  	assert.Nil(t, members[0].Properties)
  1634  	assert.Nil(t, members[1].Properties)
  1635  }
  1636  
  1637  func TestMembersIntersect(t *testing.T) {
  1638  	members1 := Members{
  1639  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1640  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1641  	}
  1642  	members2 := Members{
  1643  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1644  		{PKIid: common.PKIidType("p2"), Endpoint: "p2"},
  1645  	}
  1646  	assert.Equal(t, Members{{PKIid: common.PKIidType("p1"), Endpoint: "p1"}}, members1.Intersect(members2))
  1647  }
  1648  
  1649  func TestPeerIsolation(t *testing.T) {
  1650  	t.Parallel()
  1651  
  1652  	// Scenario:
  1653  	// Start 3 peers (peer0, peer1, peer2). Set peer1 as the bootstrap peer for all.
  1654  	// Stop peer0 and peer1 for a while, start them again and test if peer2 still gets full membership
  1655  
  1656  	config := defaultTestConfig
  1657  	// Use a smaller AliveExpirationTimeout than the default to reduce the running time of the test.
  1658  	config.AliveExpirationTimeout = 2 * config.AliveTimeInterval
  1659  
  1660  	peersNum := 3
  1661  	bootPeers := []string{bootPeer(7121)}
  1662  	instances := []*gossipInstance{}
  1663  	var inst *gossipInstance
  1664  
  1665  	// Start all peers and wait for full membership
  1666  	for i := 0; i < peersNum; i++ {
  1667  		id := fmt.Sprintf("d%d", i)
  1668  		inst = createDiscoveryInstanceCustomConfig(7120+i, id, bootPeers, config)
  1669  		instances = append(instances, inst)
  1670  	}
  1671  	assertMembership(t, instances, peersNum-1)
  1672  
  1673  	// Stop the first 2 peers so the third peer would stay alone
  1674  	stopInstances(t, instances[:peersNum-1])
  1675  	assertMembership(t, instances[peersNum-1:], 0)
  1676  
  1677  	// Sleep the same amount of time as it takes to remove a message from the aliveMsgStore (aliveMsgTTL)
  1678  	// Add a second as buffer
  1679  	time.Sleep(config.AliveExpirationTimeout*msgExpirationFactor + time.Second)
  1680  
  1681  	// Start again the first 2 peers and wait for all the peers to get full membership.
  1682  	// Especially, we want to test that peer2 won't be isolated
  1683  	for i := 0; i < peersNum-1; i++ {
  1684  		id := fmt.Sprintf("d%d", i)
  1685  		inst = createDiscoveryInstanceCustomConfig(7120+i, id, bootPeers, config)
  1686  		instances[i] = inst
  1687  	}
  1688  	assertMembership(t, instances, peersNum-1)
  1689  }
  1690  
  1691  func waitUntilOrFail(t *testing.T, pred func() bool) {
  1692  	waitUntilTimeoutOrFail(t, pred, timeout)
  1693  }
  1694  
  1695  func waitUntilTimeoutOrFail(t *testing.T, pred func() bool, timeout time.Duration) {
  1696  	start := time.Now()
  1697  	limit := start.UnixNano() + timeout.Nanoseconds()
  1698  	for time.Now().UnixNano() < limit {
  1699  		if pred() {
  1700  			return
  1701  		}
  1702  		time.Sleep(timeout / 10)
  1703  	}
  1704  	assert.Fail(t, "Timeout expired!")
  1705  }
  1706  
  1707  func waitUntilOrFailBlocking(t *testing.T, f func()) {
  1708  	successChan := make(chan struct{}, 1)
  1709  	go func() {
  1710  		f()
  1711  		successChan <- struct{}{}
  1712  	}()
  1713  	select {
  1714  	case <-time.NewTimer(timeout).C:
  1715  		break
  1716  	case <-successChan:
  1717  		return
  1718  	}
  1719  	assert.Fail(t, "Timeout expired!")
  1720  }
  1721  
  1722  func stopInstances(t *testing.T, instances []*gossipInstance) {
  1723  	stopAction := &sync.WaitGroup{}
  1724  	for _, inst := range instances {
  1725  		stopAction.Add(1)
  1726  		go func(inst *gossipInstance) {
  1727  			defer stopAction.Done()
  1728  			inst.Stop()
  1729  		}(inst)
  1730  	}
  1731  
  1732  	waitUntilOrFailBlocking(t, stopAction.Wait)
  1733  }
  1734  
  1735  func assertMembership(t *testing.T, instances []*gossipInstance, expectedNum int) {
  1736  	wg := sync.WaitGroup{}
  1737  	wg.Add(len(instances))
  1738  
  1739  	ctx, cancelation := context.WithTimeout(context.Background(), timeout)
  1740  	defer cancelation()
  1741  
  1742  	for _, inst := range instances {
  1743  		go func(ctx context.Context, i *gossipInstance) {
  1744  			defer wg.Done()
  1745  			for {
  1746  				select {
  1747  				case <-ctx.Done():
  1748  					return
  1749  				case <-time.After(timeout / 10):
  1750  					if len(i.GetMembership()) == expectedNum {
  1751  						return
  1752  					}
  1753  				}
  1754  			}
  1755  		}(ctx, inst)
  1756  	}
  1757  
  1758  	wg.Wait()
  1759  	assert.NoError(t, ctx.Err(), "Timeout expired!")
  1760  }
  1761  
  1762  func portsOfMembers(members []NetworkMember) []int {
  1763  	ports := make([]int, len(members))
  1764  	for i := range members {
  1765  		ports[i] = portOfEndpoint(members[i].Endpoint)
  1766  	}
  1767  	sort.Ints(ports)
  1768  	return ports
  1769  }
  1770  
  1771  func portOfEndpoint(endpoint string) int {
  1772  	port, _ := strconv.ParseInt(strings.Split(endpoint, ":")[1], 10, 64)
  1773  	return int(port)
  1774  }