github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/gossip/discovery/discovery_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package discovery
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"io"
    14  	"math/rand"
    15  	"net"
    16  	"sort"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"sync/atomic"
    21  	"testing"
    22  	"time"
    23  
    24  	protoG "github.com/golang/protobuf/proto"
    25  	proto "github.com/hyperledger/fabric-protos-go/gossip"
    26  	"github.com/osdi23p228/fabric/common/flogging"
    27  	"github.com/osdi23p228/fabric/gossip/common"
    28  	"github.com/osdi23p228/fabric/gossip/gossip/msgstore"
    29  	"github.com/osdi23p228/fabric/gossip/protoext"
    30  	"github.com/osdi23p228/fabric/gossip/util"
    31  	"github.com/stretchr/testify/assert"
    32  	"github.com/stretchr/testify/mock"
    33  	"go.uber.org/zap"
    34  	"go.uber.org/zap/zapcore"
    35  	"google.golang.org/grpc"
    36  	"google.golang.org/grpc/connectivity"
    37  )
    38  
    39  var timeout = time.Second * time.Duration(15)
    40  
    41  var aliveTimeInterval = time.Duration(time.Millisecond * 300)
    42  var defaultTestConfig = DiscoveryConfig{
    43  	AliveTimeInterval:            aliveTimeInterval,
    44  	AliveExpirationTimeout:       10 * aliveTimeInterval,
    45  	AliveExpirationCheckInterval: aliveTimeInterval,
    46  	ReconnectInterval:            10 * aliveTimeInterval,
    47  	MaxConnectionAttempts:        DefMaxConnectionAttempts,
    48  	MsgExpirationFactor:          DefMsgExpirationFactor,
    49  }
    50  
    51  func init() {
    52  	util.SetupTestLogging()
    53  	defaultTestConfig.MaxConnectionAttempts = 10000
    54  }
    55  
    56  type dummyReceivedMessage struct {
    57  	msg  *protoext.SignedGossipMessage
    58  	info *protoext.ConnectionInfo
    59  }
    60  
    61  func (*dummyReceivedMessage) Respond(msg *proto.GossipMessage) {
    62  	panic("implement me")
    63  }
    64  
    65  func (rm *dummyReceivedMessage) GetGossipMessage() *protoext.SignedGossipMessage {
    66  	return rm.msg
    67  }
    68  
    69  func (*dummyReceivedMessage) GetSourceEnvelope() *proto.Envelope {
    70  	panic("implement me")
    71  }
    72  
    73  func (rm *dummyReceivedMessage) GetConnectionInfo() *protoext.ConnectionInfo {
    74  	return rm.info
    75  }
    76  
    77  func (*dummyReceivedMessage) Ack(err error) {
    78  	panic("implement me")
    79  }
    80  
    81  // mockAnchorPeerTracker implements AnchorPeerTracker interface
    82  type mockAnchorPeerTracker struct {
    83  	apEndpoints []string
    84  }
    85  
    86  func (m *mockAnchorPeerTracker) IsAnchorPeer(endpoint string) bool {
    87  	return util.Contains(endpoint, m.apEndpoints)
    88  }
    89  
    90  type dummyCommModule struct {
    91  	validatedMessages chan *protoext.SignedGossipMessage
    92  	msgsReceived      uint32
    93  	msgsSent          uint32
    94  	id                string
    95  	identitySwitch    chan common.PKIidType
    96  	presumeDead       chan common.PKIidType
    97  	detectedDead      chan string
    98  	streams           map[string]proto.Gossip_GossipStreamClient
    99  	conns             map[string]*grpc.ClientConn
   100  	lock              *sync.RWMutex
   101  	incMsgs           chan protoext.ReceivedMessage
   102  	lastSeqs          map[string]uint64
   103  	shouldGossip      bool
   104  	disableComm       bool
   105  	mock              *mock.Mock
   106  	signCount         uint32
   107  }
   108  
   109  type gossipInstance struct {
   110  	msgInterceptor func(*protoext.SignedGossipMessage)
   111  	comm           *dummyCommModule
   112  	Discovery
   113  	gRGCserv      *grpc.Server
   114  	lsnr          net.Listener
   115  	shouldGossip  bool
   116  	syncInitiator *time.Ticker
   117  	stopChan      chan struct{}
   118  	port          int
   119  }
   120  
   121  func (comm *dummyCommModule) ValidateAliveMsg(am *protoext.SignedGossipMessage) bool {
   122  	comm.lock.RLock()
   123  	c := comm.validatedMessages
   124  	comm.lock.RUnlock()
   125  
   126  	if c != nil {
   127  		c <- am
   128  	}
   129  	return true
   130  }
   131  
   132  func (comm *dummyCommModule) IdentitySwitch() <-chan common.PKIidType {
   133  	return comm.identitySwitch
   134  }
   135  
   136  func (comm *dummyCommModule) recordValidation(validatedMessages chan *protoext.SignedGossipMessage) {
   137  	comm.lock.Lock()
   138  	defer comm.lock.Unlock()
   139  	comm.validatedMessages = validatedMessages
   140  }
   141  
   142  func (comm *dummyCommModule) SignMessage(am *proto.GossipMessage, internalEndpoint string) *proto.Envelope {
   143  	atomic.AddUint32(&comm.signCount, 1)
   144  	protoext.NoopSign(am)
   145  
   146  	secret := &proto.Secret{
   147  		Content: &proto.Secret_InternalEndpoint{
   148  			InternalEndpoint: internalEndpoint,
   149  		},
   150  	}
   151  	signer := func(msg []byte) ([]byte, error) {
   152  		return nil, nil
   153  	}
   154  	s, _ := protoext.NoopSign(am)
   155  	env := s.Envelope
   156  	protoext.SignSecret(env, signer, secret)
   157  	return env
   158  }
   159  
   160  func (comm *dummyCommModule) Gossip(msg *protoext.SignedGossipMessage) {
   161  	if !comm.shouldGossip || comm.disableComm {
   162  		return
   163  	}
   164  	comm.lock.Lock()
   165  	defer comm.lock.Unlock()
   166  	for _, conn := range comm.streams {
   167  		conn.Send(msg.Envelope)
   168  	}
   169  }
   170  
   171  func (comm *dummyCommModule) Forward(msg protoext.ReceivedMessage) {
   172  	if !comm.shouldGossip || comm.disableComm {
   173  		return
   174  	}
   175  	comm.lock.Lock()
   176  	defer comm.lock.Unlock()
   177  	for _, conn := range comm.streams {
   178  		conn.Send(msg.GetGossipMessage().Envelope)
   179  	}
   180  }
   181  
   182  func (comm *dummyCommModule) SendToPeer(peer *NetworkMember, msg *protoext.SignedGossipMessage) {
   183  	if comm.disableComm {
   184  		return
   185  	}
   186  	comm.lock.RLock()
   187  	_, exists := comm.streams[peer.Endpoint]
   188  	mock := comm.mock
   189  	comm.lock.RUnlock()
   190  
   191  	if mock != nil {
   192  		mock.Called(peer, msg)
   193  	}
   194  
   195  	if !exists {
   196  		if comm.Ping(peer) == false {
   197  			fmt.Printf("Ping to %v failed\n", peer.Endpoint)
   198  			return
   199  		}
   200  	}
   201  	comm.lock.Lock()
   202  	s, _ := protoext.NoopSign(msg.GossipMessage)
   203  	comm.streams[peer.Endpoint].Send(s.Envelope)
   204  	comm.lock.Unlock()
   205  	atomic.AddUint32(&comm.msgsSent, 1)
   206  }
   207  
   208  func (comm *dummyCommModule) Ping(peer *NetworkMember) bool {
   209  	if comm.disableComm {
   210  		return false
   211  	}
   212  	comm.lock.Lock()
   213  	defer comm.lock.Unlock()
   214  
   215  	if comm.mock != nil {
   216  		comm.mock.Called()
   217  	}
   218  
   219  	_, alreadyExists := comm.streams[peer.Endpoint]
   220  	conn := comm.conns[peer.Endpoint]
   221  	if !alreadyExists || conn.GetState() == connectivity.Shutdown {
   222  		newConn, err := grpc.Dial(peer.Endpoint, grpc.WithInsecure())
   223  		if err != nil {
   224  			return false
   225  		}
   226  		if stream, err := proto.NewGossipClient(newConn).GossipStream(context.Background()); err == nil {
   227  			comm.conns[peer.Endpoint] = newConn
   228  			comm.streams[peer.Endpoint] = stream
   229  			return true
   230  		}
   231  		return false
   232  	}
   233  	if _, err := proto.NewGossipClient(conn).Ping(context.Background(), &proto.Empty{}); err != nil {
   234  		return false
   235  	}
   236  	return true
   237  }
   238  
   239  func (comm *dummyCommModule) Accept() <-chan protoext.ReceivedMessage {
   240  	return comm.incMsgs
   241  }
   242  
   243  func (comm *dummyCommModule) PresumedDead() <-chan common.PKIidType {
   244  	return comm.presumeDead
   245  }
   246  
   247  func (comm *dummyCommModule) CloseConn(peer *NetworkMember) {
   248  	comm.lock.Lock()
   249  	defer comm.lock.Unlock()
   250  
   251  	if _, exists := comm.streams[peer.Endpoint]; !exists {
   252  		return
   253  	}
   254  
   255  	comm.streams[peer.Endpoint].CloseSend()
   256  	comm.conns[peer.Endpoint].Close()
   257  }
   258  
   259  func (g *gossipInstance) receivedMsgCount() int {
   260  	return int(atomic.LoadUint32(&g.comm.msgsReceived))
   261  }
   262  
   263  func (g *gossipInstance) sentMsgCount() int {
   264  	return int(atomic.LoadUint32(&g.comm.msgsSent))
   265  }
   266  
   267  func (g *gossipInstance) discoveryImpl() *gossipDiscoveryImpl {
   268  	return g.Discovery.(*gossipDiscoveryImpl)
   269  }
   270  
   271  func (g *gossipInstance) initiateSync(frequency time.Duration, peerNum int) {
   272  	g.syncInitiator = time.NewTicker(frequency)
   273  	g.stopChan = make(chan struct{})
   274  	go func() {
   275  		for {
   276  			select {
   277  			case <-g.syncInitiator.C:
   278  				g.Discovery.InitiateSync(peerNum)
   279  			case <-g.stopChan:
   280  				g.syncInitiator.Stop()
   281  				return
   282  			}
   283  		}
   284  	}()
   285  }
   286  
   287  func (g *gossipInstance) GossipStream(stream proto.Gossip_GossipStreamServer) error {
   288  	for {
   289  		envelope, err := stream.Recv()
   290  		if err == io.EOF {
   291  			return nil
   292  		}
   293  		if err != nil {
   294  			return err
   295  		}
   296  		lgr := g.Discovery.(*gossipDiscoveryImpl).logger
   297  		gMsg, err := protoext.EnvelopeToGossipMessage(envelope)
   298  		if err != nil {
   299  			lgr.Warning("Failed deserializing GossipMessage from envelope:", err)
   300  			continue
   301  		}
   302  		g.msgInterceptor(gMsg)
   303  
   304  		lgr.Debug(g.Discovery.Self().Endpoint, "Got message:", gMsg)
   305  		g.comm.incMsgs <- &dummyReceivedMessage{
   306  			msg: gMsg,
   307  			info: &protoext.ConnectionInfo{
   308  				ID: common.PKIidType("testID"),
   309  			},
   310  		}
   311  		atomic.AddUint32(&g.comm.msgsReceived, 1)
   312  
   313  		if aliveMsg := gMsg.GetAliveMsg(); aliveMsg != nil {
   314  			g.tryForwardMessage(gMsg)
   315  		}
   316  	}
   317  }
   318  
   319  func (g *gossipInstance) tryForwardMessage(msg *protoext.SignedGossipMessage) {
   320  	g.comm.lock.Lock()
   321  
   322  	aliveMsg := msg.GetAliveMsg()
   323  
   324  	forward := false
   325  	id := string(aliveMsg.Membership.PkiId)
   326  	seqNum := aliveMsg.Timestamp.SeqNum
   327  	if last, exists := g.comm.lastSeqs[id]; exists {
   328  		if last < seqNum {
   329  			g.comm.lastSeqs[id] = seqNum
   330  			forward = true
   331  		}
   332  	} else {
   333  		g.comm.lastSeqs[id] = seqNum
   334  		forward = true
   335  	}
   336  
   337  	g.comm.lock.Unlock()
   338  
   339  	if forward {
   340  		g.comm.Gossip(msg)
   341  	}
   342  }
   343  
   344  func (g *gossipInstance) Stop() {
   345  	if g.syncInitiator != nil {
   346  		g.stopChan <- struct{}{}
   347  	}
   348  	g.gRGCserv.Stop()
   349  	g.lsnr.Close()
   350  	g.comm.lock.Lock()
   351  	for _, stream := range g.comm.streams {
   352  		stream.CloseSend()
   353  	}
   354  	g.comm.lock.Unlock()
   355  	for _, conn := range g.comm.conns {
   356  		conn.Close()
   357  	}
   358  	g.Discovery.Stop()
   359  }
   360  
   361  func (g *gossipInstance) Ping(context.Context, *proto.Empty) (*proto.Empty, error) {
   362  	return &proto.Empty{}, nil
   363  }
   364  
   365  var noopPolicy = func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
   366  	return func(msg *protoext.SignedGossipMessage) bool {
   367  			return true
   368  		}, func(message *protoext.SignedGossipMessage) *proto.Envelope {
   369  			return message.Envelope
   370  		}
   371  }
   372  
   373  func createDiscoveryInstance(port int, id string, bootstrapPeers []string) *gossipInstance {
   374  	return createDiscoveryInstanceCustomConfig(port, id, bootstrapPeers, defaultTestConfig)
   375  }
   376  
   377  func createDiscoveryInstanceCustomConfig(port int, id string, bootstrapPeers []string, config DiscoveryConfig) *gossipInstance {
   378  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, true, noopPolicy, config)
   379  }
   380  
   381  func createDiscoveryInstanceWithNoGossip(port int, id string, bootstrapPeers []string) *gossipInstance {
   382  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, noopPolicy, defaultTestConfig)
   383  }
   384  
   385  func createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(port int, id string, bootstrapPeers []string, pol DisclosurePolicy) *gossipInstance {
   386  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, pol, defaultTestConfig)
   387  }
   388  
   389  func createDiscoveryInstanceThatGossips(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy, config DiscoveryConfig) *gossipInstance {
   390  	return createDiscoveryInstanceThatGossipsWithInterceptors(port, id, bootstrapPeers, shouldGossip, pol, func(_ *protoext.SignedGossipMessage) {}, config)
   391  }
   392  
   393  func createDiscoveryInstanceThatGossipsWithInterceptors(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy, f func(*protoext.SignedGossipMessage), config DiscoveryConfig) *gossipInstance {
   394  	mockTracker := &mockAnchorPeerTracker{}
   395  	return createDiscoveryInstanceWithAnchorPeerTracker(port, id, bootstrapPeers, shouldGossip, pol, f, config, mockTracker, nil)
   396  }
   397  
   398  func createDiscoveryInstanceWithAnchorPeerTracker(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy,
   399  	f func(*protoext.SignedGossipMessage), config DiscoveryConfig, anchorPeerTracker AnchorPeerTracker, logger util.Logger) *gossipInstance {
   400  	comm := &dummyCommModule{
   401  		conns:          make(map[string]*grpc.ClientConn),
   402  		streams:        make(map[string]proto.Gossip_GossipStreamClient),
   403  		incMsgs:        make(chan protoext.ReceivedMessage, 1000),
   404  		presumeDead:    make(chan common.PKIidType, 10000),
   405  		id:             id,
   406  		detectedDead:   make(chan string, 10000),
   407  		identitySwitch: make(chan common.PKIidType),
   408  		lock:           &sync.RWMutex{},
   409  		lastSeqs:       make(map[string]uint64),
   410  		shouldGossip:   shouldGossip,
   411  		disableComm:    false,
   412  	}
   413  
   414  	endpoint := fmt.Sprintf("localhost:%d", port)
   415  	self := NetworkMember{
   416  		Metadata:         []byte{},
   417  		PKIid:            []byte(endpoint),
   418  		Endpoint:         endpoint,
   419  		InternalEndpoint: endpoint,
   420  	}
   421  
   422  	listenAddress := fmt.Sprintf("%s:%d", "", port)
   423  	ll, err := net.Listen("tcp", listenAddress)
   424  	if err != nil {
   425  		errMsg := fmt.Sprintf("Failed creating listener on address %v for gossip instance: %v", listenAddress, err)
   426  		panic(errMsg)
   427  	}
   428  	s := grpc.NewServer()
   429  
   430  	config.BootstrapPeers = bootstrapPeers
   431  
   432  	if logger == nil {
   433  		logger = util.GetLogger(util.DiscoveryLogger, self.InternalEndpoint)
   434  	}
   435  	discSvc := NewDiscoveryService(self, comm, comm, pol, config, anchorPeerTracker, logger)
   436  	for _, bootPeer := range bootstrapPeers {
   437  		bp := bootPeer
   438  		discSvc.Connect(NetworkMember{Endpoint: bp, InternalEndpoint: bootPeer}, func() (*PeerIdentification, error) {
   439  			return &PeerIdentification{SelfOrg: true, ID: common.PKIidType(bp)}, nil
   440  		})
   441  	}
   442  
   443  	gossInst := &gossipInstance{comm: comm, gRGCserv: s, Discovery: discSvc, lsnr: ll, shouldGossip: shouldGossip, port: port, msgInterceptor: f}
   444  
   445  	proto.RegisterGossipServer(s, gossInst)
   446  	go s.Serve(ll)
   447  
   448  	return gossInst
   449  }
   450  
   451  func bootPeer(port int) string {
   452  	return fmt.Sprintf("localhost:%d", port)
   453  }
   454  
   455  func TestClone(t *testing.T) {
   456  	nm := &NetworkMember{
   457  		PKIid: common.PKIidType("abc"),
   458  		Properties: &proto.Properties{
   459  			LedgerHeight: 1,
   460  			LeftChannel:  true,
   461  		},
   462  		Envelope: &proto.Envelope{
   463  			Payload: []byte("payload"),
   464  		},
   465  		InternalEndpoint: "internal",
   466  		Metadata:         []byte{1, 2, 3},
   467  		Endpoint:         "endpoint",
   468  	}
   469  
   470  	nm2 := nm.Clone()
   471  	assert.Equal(t, *nm, nm2, "Clones are different")
   472  	assert.False(t, nm.Properties == nm2.Properties, "Cloning should be deep and not shallow")
   473  	assert.False(t, nm.Envelope == nm2.Envelope, "Cloning should be deep and not shallow")
   474  }
   475  
   476  func TestHasExternalEndpoints(t *testing.T) {
   477  	memberWithEndpoint := NetworkMember{Endpoint: "foo"}
   478  	memberWithoutEndpoint := NetworkMember{}
   479  
   480  	assert.True(t, HasExternalEndpoint(memberWithEndpoint))
   481  	assert.False(t, HasExternalEndpoint(memberWithoutEndpoint))
   482  }
   483  
   484  func TestToString(t *testing.T) {
   485  	nm := NetworkMember{
   486  		Endpoint:         "a",
   487  		InternalEndpoint: "b",
   488  	}
   489  	assert.Equal(t, "b", nm.PreferredEndpoint())
   490  	nm = NetworkMember{
   491  		Endpoint: "a",
   492  	}
   493  	assert.Equal(t, "a", nm.PreferredEndpoint())
   494  
   495  	now := time.Now()
   496  	ts := &timestamp{
   497  		incTime: now,
   498  		seqNum:  uint64(42),
   499  	}
   500  	assert.Equal(t, fmt.Sprintf("%d, %d", now.UnixNano(), 42), fmt.Sprint(ts))
   501  }
   502  
   503  func TestNetworkMemberString(t *testing.T) {
   504  	tests := []struct {
   505  		input    NetworkMember
   506  		expected string
   507  	}{
   508  		{
   509  			input:    NetworkMember{Endpoint: "endpoint", InternalEndpoint: "internal-endpoint", PKIid: common.PKIidType{0, 1, 2, 3}, Metadata: nil},
   510  			expected: "Endpoint: endpoint, InternalEndpoint: internal-endpoint, PKI-ID: 00010203, Metadata: ",
   511  		},
   512  		{
   513  			input:    NetworkMember{Endpoint: "endpoint", InternalEndpoint: "internal-endpoint", PKIid: common.PKIidType{0, 1, 2, 3}, Metadata: []byte{4, 5, 6, 7}},
   514  			expected: "Endpoint: endpoint, InternalEndpoint: internal-endpoint, PKI-ID: 00010203, Metadata: 04050607",
   515  		},
   516  	}
   517  	for _, tt := range tests {
   518  		assert.Equal(t, tt.expected, tt.input.String())
   519  	}
   520  }
   521  
   522  func TestBadInput(t *testing.T) {
   523  	inst := createDiscoveryInstance(2048, fmt.Sprintf("d%d", 0), []string{})
   524  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(nil)
   525  	s, _ := protoext.NoopSign(&proto.GossipMessage{
   526  		Content: &proto.GossipMessage_DataMsg{
   527  			DataMsg: &proto.DataMessage{},
   528  		},
   529  	})
   530  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(&dummyReceivedMessage{
   531  		msg: s,
   532  		info: &protoext.ConnectionInfo{
   533  			ID: common.PKIidType("testID"),
   534  		},
   535  	})
   536  }
   537  
   538  func TestConnect(t *testing.T) {
   539  	nodeNum := 10
   540  	instances := []*gossipInstance{}
   541  	firstSentMemReqMsgs := make(chan *protoext.SignedGossipMessage, nodeNum)
   542  	for i := 0; i < nodeNum; i++ {
   543  		inst := createDiscoveryInstance(7611+i, fmt.Sprintf("d%d", i), []string{})
   544  
   545  		inst.comm.lock.Lock()
   546  		inst.comm.mock = &mock.Mock{}
   547  		inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   548  			inst := inst
   549  			msg := arguments.Get(1).(*protoext.SignedGossipMessage)
   550  			if req := msg.GetMemReq(); req != nil {
   551  				selfMsg, _ := protoext.EnvelopeToGossipMessage(req.SelfInformation)
   552  				firstSentMemReqMsgs <- selfMsg
   553  				inst.comm.lock.Lock()
   554  				inst.comm.mock = nil
   555  				inst.comm.lock.Unlock()
   556  			}
   557  		})
   558  		inst.comm.mock.On("Ping", mock.Anything)
   559  		inst.comm.lock.Unlock()
   560  		instances = append(instances, inst)
   561  		j := (i + 1) % 10
   562  		endpoint := fmt.Sprintf("localhost:%d", 7611+j)
   563  		netMember2Connect2 := NetworkMember{Endpoint: endpoint, PKIid: []byte(endpoint)}
   564  		inst.Connect(netMember2Connect2, func() (identification *PeerIdentification, err error) {
   565  			return &PeerIdentification{SelfOrg: false, ID: nil}, nil
   566  		})
   567  	}
   568  
   569  	time.Sleep(time.Second * 3)
   570  	fullMembership := func() bool {
   571  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   572  	}
   573  	waitUntilOrFail(t, fullMembership)
   574  
   575  	discInst := instances[rand.Intn(len(instances))].Discovery.(*gossipDiscoveryImpl)
   576  	mr, _ := discInst.createMembershipRequest(true)
   577  	am, _ := protoext.EnvelopeToGossipMessage(mr.GetMemReq().SelfInformation)
   578  	assert.NotNil(t, am.SecretEnvelope)
   579  	mr2, _ := discInst.createMembershipRequest(false)
   580  	am, _ = protoext.EnvelopeToGossipMessage(mr2.GetMemReq().SelfInformation)
   581  	assert.Nil(t, am.SecretEnvelope)
   582  	stopInstances(t, instances)
   583  	assert.Len(t, firstSentMemReqMsgs, 10)
   584  	close(firstSentMemReqMsgs)
   585  	for firstSentSelfMsg := range firstSentMemReqMsgs {
   586  		assert.Nil(t, firstSentSelfMsg.Envelope.SecretEnvelope)
   587  	}
   588  }
   589  
   590  func TestNoSigningIfNoMembership(t *testing.T) {
   591  	t.Parallel()
   592  
   593  	inst := createDiscoveryInstance(8931, "foreveralone", nil)
   594  	defer inst.Stop()
   595  	time.Sleep(defaultTestConfig.AliveTimeInterval * 10)
   596  	assert.Zero(t, atomic.LoadUint32(&inst.comm.signCount))
   597  
   598  	inst.InitiateSync(10000)
   599  	assert.Zero(t, atomic.LoadUint32(&inst.comm.signCount))
   600  }
   601  
   602  func TestValidation(t *testing.T) {
   603  	// Scenarios: This test contains the following sub-tests:
   604  	// 1) alive message validation: a message is validated <==> it entered the message store
   605  	// 2) request/response message validation:
   606  	//   2.1) alive messages from membership requests/responses are validated.
   607  	//   2.2) once alive messages enter the message store, reception of them via membership responses
   608  	//        doesn't trigger validation, but via membership requests - do.
   609  
   610  	wrapReceivedMessage := func(msg *protoext.SignedGossipMessage) protoext.ReceivedMessage {
   611  		return &dummyReceivedMessage{
   612  			msg: msg,
   613  			info: &protoext.ConnectionInfo{
   614  				ID: common.PKIidType("testID"),
   615  			},
   616  		}
   617  	}
   618  
   619  	requestMessagesReceived := make(chan *protoext.SignedGossipMessage, 100)
   620  	responseMessagesReceived := make(chan *protoext.SignedGossipMessage, 100)
   621  	aliveMessagesReceived := make(chan *protoext.SignedGossipMessage, 5000)
   622  
   623  	var membershipRequest atomic.Value
   624  	var membershipResponseWithAlivePeers atomic.Value
   625  	var membershipResponseWithDeadPeers atomic.Value
   626  
   627  	recordMembershipRequest := func(req *protoext.SignedGossipMessage) {
   628  		msg, _ := protoext.EnvelopeToGossipMessage(req.GetMemReq().SelfInformation)
   629  		membershipRequest.Store(req)
   630  		requestMessagesReceived <- msg
   631  	}
   632  
   633  	recordMembershipResponse := func(res *protoext.SignedGossipMessage) {
   634  		memRes := res.GetMemRes()
   635  		if len(memRes.GetAlive()) > 0 {
   636  			membershipResponseWithAlivePeers.Store(res)
   637  		}
   638  		if len(memRes.GetDead()) > 0 {
   639  			membershipResponseWithDeadPeers.Store(res)
   640  		}
   641  		responseMessagesReceived <- res
   642  	}
   643  
   644  	interceptor := func(msg *protoext.SignedGossipMessage) {
   645  		if memReq := msg.GetMemReq(); memReq != nil {
   646  			recordMembershipRequest(msg)
   647  			return
   648  		}
   649  
   650  		if memRes := msg.GetMemRes(); memRes != nil {
   651  			recordMembershipResponse(msg)
   652  			return
   653  		}
   654  		// Else, it's an alive message
   655  		aliveMessagesReceived <- msg
   656  	}
   657  
   658  	// p3 is the boot peer of p1, and p1 is the boot peer of p2.
   659  	// p1 sends a (membership) request to p3, and receives a (membership) response back.
   660  	// p2 sends a (membership) request to p1.
   661  	// Therefore, p1 receives both a membership request and a response.
   662  	p1 := createDiscoveryInstanceThatGossipsWithInterceptors(4675, "p1", []string{bootPeer(4677)}, true, noopPolicy, interceptor, defaultTestConfig)
   663  	p2 := createDiscoveryInstance(4676, "p2", []string{bootPeer(4675)})
   664  	p3 := createDiscoveryInstance(4677, "p3", nil)
   665  	instances := []*gossipInstance{p1, p2, p3}
   666  
   667  	assertMembership(t, instances, 2)
   668  
   669  	instances = []*gossipInstance{p1, p2}
   670  	// Stop p3 and wait until its death is detected
   671  	p3.Stop()
   672  	assertMembership(t, instances, 1)
   673  	// Force p1 to send a membership request so it can receive back a response
   674  	// with dead peers.
   675  	p1.InitiateSync(1)
   676  
   677  	// Wait until a response with a dead peer is received
   678  	waitUntilOrFail(t, func() bool {
   679  		return membershipResponseWithDeadPeers.Load() != nil
   680  	})
   681  
   682  	p1.Stop()
   683  	p2.Stop()
   684  
   685  	close(aliveMessagesReceived)
   686  	t.Log("Recorded", len(aliveMessagesReceived), "alive messages")
   687  	t.Log("Recorded", len(requestMessagesReceived), "request messages")
   688  	t.Log("Recorded", len(responseMessagesReceived), "response messages")
   689  
   690  	// Ensure we got alive messages from membership requests and from membership responses
   691  	assert.NotNil(t, membershipResponseWithAlivePeers.Load())
   692  	assert.NotNil(t, membershipRequest.Load())
   693  
   694  	t.Run("alive message", func(t *testing.T) {
   695  		// Spawn a new peer - p4
   696  		p4 := createDiscoveryInstance(4678, "p1", nil)
   697  		defer p4.Stop()
   698  		// Record messages validated
   699  		validatedMessages := make(chan *protoext.SignedGossipMessage, 5000)
   700  		p4.comm.recordValidation(validatedMessages)
   701  		tmpMsgs := make(chan *protoext.SignedGossipMessage, 5000)
   702  		// Replay the messages sent to p1 into p4, and also save them into a temporary channel
   703  		for msg := range aliveMessagesReceived {
   704  			p4.comm.incMsgs <- wrapReceivedMessage(msg)
   705  			tmpMsgs <- msg
   706  		}
   707  
   708  		// Simulate the messages received by p4 into the message store
   709  		policy := protoext.NewGossipMessageComparator(0)
   710  		msgStore := msgstore.NewMessageStore(policy, func(_ interface{}) {})
   711  		close(tmpMsgs)
   712  		for msg := range tmpMsgs {
   713  			if msgStore.Add(msg) {
   714  				// Ensure the message was verified if it can be added into the message store
   715  				expectedMessage := <-validatedMessages
   716  				assert.Equal(t, expectedMessage, msg)
   717  			}
   718  		}
   719  		// Ensure we didn't validate any other messages.
   720  		assert.Empty(t, validatedMessages)
   721  	})
   722  
   723  	req := membershipRequest.Load().(*protoext.SignedGossipMessage)
   724  	res := membershipResponseWithDeadPeers.Load().(*protoext.SignedGossipMessage)
   725  	// Ensure the membership response contains both alive and dead peers
   726  	assert.Len(t, res.GetMemRes().GetAlive(), 2)
   727  	assert.Len(t, res.GetMemRes().GetDead(), 1)
   728  
   729  	for _, testCase := range []struct {
   730  		name                  string
   731  		expectedAliveMessages int
   732  		port                  int
   733  		message               *protoext.SignedGossipMessage
   734  		shouldBeReValidated   bool
   735  	}{
   736  		{
   737  			name:                  "membership request",
   738  			expectedAliveMessages: 1,
   739  			message:               req,
   740  			port:                  4679,
   741  			shouldBeReValidated:   true,
   742  		},
   743  		{
   744  			name:                  "membership response",
   745  			expectedAliveMessages: 3,
   746  			message:               res,
   747  			port:                  4680,
   748  		},
   749  	} {
   750  		testCase := testCase
   751  		t.Run(testCase.name, func(t *testing.T) {
   752  			p := createDiscoveryInstance(testCase.port, "p", nil)
   753  			defer p.Stop()
   754  			// Record messages validated
   755  			validatedMessages := make(chan *protoext.SignedGossipMessage, testCase.expectedAliveMessages)
   756  			p.comm.recordValidation(validatedMessages)
   757  
   758  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   759  			// Ensure all messages were validated
   760  			for i := 0; i < testCase.expectedAliveMessages; i++ {
   761  				validatedMsg := <-validatedMessages
   762  				// send the message directly to be included in the message store
   763  				p.comm.incMsgs <- wrapReceivedMessage(validatedMsg)
   764  			}
   765  			// Wait for the messages to be validated
   766  			for i := 0; i < testCase.expectedAliveMessages; i++ {
   767  				<-validatedMessages
   768  			}
   769  			// Not more than testCase.expectedAliveMessages should have been validated
   770  			assert.Empty(t, validatedMessages)
   771  
   772  			if !testCase.shouldBeReValidated {
   773  				// Re-submit the message twice and ensure it wasn't validated.
   774  				// If it is validated, panic would occur because an enqueue to the validatesMessages channel
   775  				// would be attempted and the channel is closed.
   776  				close(validatedMessages)
   777  			}
   778  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   779  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   780  			// Wait until the size of the channel is zero. It means at least one message was processed.
   781  			waitUntilOrFail(t, func() bool {
   782  				return len(p.comm.incMsgs) == 0
   783  			})
   784  		})
   785  	}
   786  }
   787  
   788  func TestUpdate(t *testing.T) {
   789  	nodeNum := 5
   790  	bootPeers := []string{bootPeer(6611), bootPeer(6612)}
   791  	instances := []*gossipInstance{}
   792  
   793  	inst := createDiscoveryInstance(6611, "d1", bootPeers)
   794  	instances = append(instances, inst)
   795  
   796  	inst = createDiscoveryInstance(6612, "d2", bootPeers)
   797  	instances = append(instances, inst)
   798  
   799  	for i := 3; i <= nodeNum; i++ {
   800  		id := fmt.Sprintf("d%d", i)
   801  		inst = createDiscoveryInstance(6610+i, id, bootPeers)
   802  		instances = append(instances, inst)
   803  	}
   804  
   805  	fullMembership := func() bool {
   806  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   807  	}
   808  
   809  	waitUntilOrFail(t, fullMembership)
   810  
   811  	instances[0].UpdateMetadata([]byte("bla bla"))
   812  	instances[nodeNum-1].UpdateEndpoint("localhost:5511")
   813  
   814  	checkMembership := func() bool {
   815  		for _, member := range instances[nodeNum-1].GetMembership() {
   816  			if string(member.PKIid) == instances[0].comm.id {
   817  				if "bla bla" != string(member.Metadata) {
   818  					return false
   819  				}
   820  			}
   821  		}
   822  
   823  		for _, member := range instances[0].GetMembership() {
   824  			if string(member.PKIid) == instances[nodeNum-1].comm.id {
   825  				if "localhost:5511" != string(member.Endpoint) {
   826  					return false
   827  				}
   828  			}
   829  		}
   830  		return true
   831  	}
   832  
   833  	waitUntilOrFail(t, checkMembership)
   834  	stopInstances(t, instances)
   835  }
   836  
   837  func TestInitiateSync(t *testing.T) {
   838  	nodeNum := 10
   839  	bootPeers := []string{bootPeer(3611), bootPeer(3612)}
   840  	instances := []*gossipInstance{}
   841  
   842  	toDie := int32(0)
   843  	for i := 1; i <= nodeNum; i++ {
   844  		id := fmt.Sprintf("d%d", i)
   845  		inst := createDiscoveryInstanceWithNoGossip(3610+i, id, bootPeers)
   846  		instances = append(instances, inst)
   847  		go func() {
   848  			for {
   849  				if atomic.LoadInt32(&toDie) == int32(1) {
   850  					return
   851  				}
   852  				time.Sleep(defaultTestConfig.AliveExpirationTimeout / 3)
   853  				inst.InitiateSync(9)
   854  			}
   855  		}()
   856  	}
   857  	time.Sleep(defaultTestConfig.AliveExpirationTimeout * 4)
   858  	assertMembership(t, instances, nodeNum-1)
   859  	atomic.StoreInt32(&toDie, int32(1))
   860  	stopInstances(t, instances)
   861  }
   862  
   863  func TestSelf(t *testing.T) {
   864  	inst := createDiscoveryInstance(13463, "d1", []string{})
   865  	defer inst.Stop()
   866  	env := inst.Self().Envelope
   867  	sMsg, err := protoext.EnvelopeToGossipMessage(env)
   868  	assert.NoError(t, err)
   869  	member := sMsg.GetAliveMsg().Membership
   870  	assert.Equal(t, "localhost:13463", member.Endpoint)
   871  	assert.Equal(t, []byte("localhost:13463"), member.PkiId)
   872  
   873  	assert.Equal(t, "localhost:13463", inst.Self().Endpoint)
   874  	assert.Equal(t, common.PKIidType("localhost:13463"), inst.Self().PKIid)
   875  }
   876  
   877  func TestExpiration(t *testing.T) {
   878  	nodeNum := 5
   879  	bootPeers := []string{bootPeer(2611), bootPeer(2612)}
   880  	instances := []*gossipInstance{}
   881  
   882  	inst := createDiscoveryInstance(2611, "d1", bootPeers)
   883  	instances = append(instances, inst)
   884  
   885  	inst = createDiscoveryInstance(2612, "d2", bootPeers)
   886  	instances = append(instances, inst)
   887  
   888  	for i := 3; i <= nodeNum; i++ {
   889  		id := fmt.Sprintf("d%d", i)
   890  		inst = createDiscoveryInstance(2610+i, id, bootPeers)
   891  		instances = append(instances, inst)
   892  	}
   893  
   894  	assertMembership(t, instances, nodeNum-1)
   895  
   896  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
   897  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
   898  
   899  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
   900  
   901  	stopAction := &sync.WaitGroup{}
   902  	for i, inst := range instances {
   903  		if i+2 == nodeNum {
   904  			break
   905  		}
   906  		stopAction.Add(1)
   907  		go func(inst *gossipInstance) {
   908  			defer stopAction.Done()
   909  			inst.Stop()
   910  		}(inst)
   911  	}
   912  
   913  	waitUntilOrFailBlocking(t, stopAction.Wait)
   914  }
   915  
   916  func TestGetFullMembership(t *testing.T) {
   917  	nodeNum := 15
   918  	bootPeers := []string{bootPeer(5511), bootPeer(5512)}
   919  	instances := []*gossipInstance{}
   920  	var inst *gossipInstance
   921  
   922  	for i := 3; i <= nodeNum; i++ {
   923  		id := fmt.Sprintf("d%d", i)
   924  		inst = createDiscoveryInstance(5510+i, id, bootPeers)
   925  		instances = append(instances, inst)
   926  	}
   927  
   928  	time.Sleep(time.Second)
   929  
   930  	inst = createDiscoveryInstance(5511, "d1", bootPeers)
   931  	instances = append(instances, inst)
   932  
   933  	inst = createDiscoveryInstance(5512, "d2", bootPeers)
   934  	instances = append(instances, inst)
   935  
   936  	assertMembership(t, instances, nodeNum-1)
   937  
   938  	// Ensure that internal endpoint was propagated to everyone
   939  	for _, inst := range instances {
   940  		for _, member := range inst.GetMembership() {
   941  			assert.NotEmpty(t, member.InternalEndpoint)
   942  			assert.NotEmpty(t, member.Endpoint)
   943  		}
   944  	}
   945  
   946  	// Check that Lookup() is valid
   947  	for _, inst := range instances {
   948  		for _, member := range inst.GetMembership() {
   949  			assert.Equal(t, string(member.PKIid), inst.Lookup(member.PKIid).Endpoint)
   950  			assert.Equal(t, member.PKIid, inst.Lookup(member.PKIid).PKIid)
   951  		}
   952  	}
   953  
   954  	stopInstances(t, instances)
   955  }
   956  
   957  func TestGossipDiscoveryStopping(t *testing.T) {
   958  	inst := createDiscoveryInstance(9611, "d1", []string{bootPeer(9611)})
   959  	time.Sleep(time.Second)
   960  	waitUntilOrFailBlocking(t, inst.Stop)
   961  }
   962  
   963  func TestGossipDiscoverySkipConnectingToLocalhostBootstrap(t *testing.T) {
   964  	inst := createDiscoveryInstance(11611, "d1", []string{"localhost:11611", "127.0.0.1:11611"})
   965  	inst.comm.lock.Lock()
   966  	inst.comm.mock = &mock.Mock{}
   967  	inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(mock.Arguments) {
   968  		t.Fatal("Should not have connected to any peer")
   969  	})
   970  	inst.comm.mock.On("Ping", mock.Anything).Run(func(mock.Arguments) {
   971  		t.Fatal("Should not have connected to any peer")
   972  	})
   973  	inst.comm.lock.Unlock()
   974  	time.Sleep(time.Second * 3)
   975  	waitUntilOrFailBlocking(t, inst.Stop)
   976  }
   977  
   978  func TestConvergence(t *testing.T) {
   979  	// scenario:
   980  	// {boot peer: [peer list]}
   981  	// {d1: d2, d3, d4}
   982  	// {d5: d6, d7, d8}
   983  	// {d9: d10, d11, d12}
   984  	// connect all boot peers with d13
   985  	// take down d13
   986  	// ensure still full membership
   987  	instances := []*gossipInstance{}
   988  	for _, i := range []int{1, 5, 9} {
   989  		bootPort := 4610 + i
   990  		id := fmt.Sprintf("d%d", i)
   991  		leader := createDiscoveryInstance(bootPort, id, []string{})
   992  		instances = append(instances, leader)
   993  		for minionIndex := 1; minionIndex <= 3; minionIndex++ {
   994  			id := fmt.Sprintf("d%d", i+minionIndex)
   995  			minion := createDiscoveryInstance(4610+minionIndex+i, id, []string{bootPeer(bootPort)})
   996  			instances = append(instances, minion)
   997  		}
   998  	}
   999  
  1000  	assertMembership(t, instances, 3)
  1001  	connector := createDiscoveryInstance(4623, "d13", []string{bootPeer(4611), bootPeer(4615), bootPeer(4619)})
  1002  	instances = append(instances, connector)
  1003  	assertMembership(t, instances, 12)
  1004  	connector.Stop()
  1005  	instances = instances[:len(instances)-1]
  1006  	assertMembership(t, instances, 11)
  1007  	stopInstances(t, instances)
  1008  }
  1009  
  1010  func TestDisclosurePolicyWithPull(t *testing.T) {
  1011  	// Scenario: run 2 groups of peers that simulate 2 organizations:
  1012  	// {p0, p1, p2, p3, p4}
  1013  	// {p5, p6, p7, p8, p9}
  1014  	// Only peers that have an even id have external addresses
  1015  	// and only these peers should be published to peers of the other group,
  1016  	// while the only ones that need to know about them are peers
  1017  	// that have an even id themselves.
  1018  	// Furthermore, peers in different sets, should not know about internal addresses of
  1019  	// other peers.
  1020  
  1021  	// This is a bootstrap map that matches for each peer its own bootstrap peer.
  1022  	// In practice (production) peers should only use peers of their orgs as bootstrap peers,
  1023  	// but the discovery layer is ignorant of organizations.
  1024  	bootPeerMap := map[int]int{
  1025  		8610: 8616,
  1026  		8611: 8610,
  1027  		8612: 8610,
  1028  		8613: 8610,
  1029  		8614: 8610,
  1030  		8615: 8616,
  1031  		8616: 8610,
  1032  		8617: 8616,
  1033  		8618: 8616,
  1034  		8619: 8616,
  1035  	}
  1036  
  1037  	// This map matches each peer, the peers it should know about in the test scenario.
  1038  	peersThatShouldBeKnownToPeers := map[int][]int{
  1039  		8610: {8611, 8612, 8613, 8614, 8616, 8618},
  1040  		8611: {8610, 8612, 8613, 8614},
  1041  		8612: {8610, 8611, 8613, 8614, 8616, 8618},
  1042  		8613: {8610, 8611, 8612, 8614},
  1043  		8614: {8610, 8611, 8612, 8613, 8616, 8618},
  1044  		8615: {8616, 8617, 8618, 8619},
  1045  		8616: {8610, 8612, 8614, 8615, 8617, 8618, 8619},
  1046  		8617: {8615, 8616, 8618, 8619},
  1047  		8618: {8610, 8612, 8614, 8615, 8616, 8617, 8619},
  1048  		8619: {8615, 8616, 8617, 8618},
  1049  	}
  1050  	// Create the peers in the two groups
  1051  	instances1, instances2 := createDisjointPeerGroupsWithNoGossip(bootPeerMap)
  1052  	// Sleep a while to let them establish membership. This time should be more than enough
  1053  	// because the instances are configured to pull membership in very high frequency from
  1054  	// up to 10 peers (which results in - pulling from everyone)
  1055  	waitUntilOrFail(t, func() bool {
  1056  		for _, inst := range append(instances1, instances2...) {
  1057  			// Ensure the expected membership is equal in size to the actual membership
  1058  			// of each peer.
  1059  			portsOfKnownMembers := portsOfMembers(inst.GetMembership())
  1060  			if len(peersThatShouldBeKnownToPeers[inst.port]) != len(portsOfKnownMembers) {
  1061  				return false
  1062  			}
  1063  		}
  1064  		return true
  1065  	})
  1066  	for _, inst := range append(instances1, instances2...) {
  1067  		portsOfKnownMembers := portsOfMembers(inst.GetMembership())
  1068  		// Ensure the expected membership is equal to the actual membership
  1069  		// of each peer. the portsOfMembers returns a sorted slice so assert.Equal does the job.
  1070  		assert.Equal(t, peersThatShouldBeKnownToPeers[inst.port], portsOfKnownMembers)
  1071  		// Next, check that internal endpoints aren't leaked across groups,
  1072  		for _, knownPeer := range inst.GetMembership() {
  1073  			// If internal endpoint is known, ensure the peers are in the same group
  1074  			// unless the peer in question is a peer that has a public address.
  1075  			// We cannot control what we disclose about ourselves when we send a membership request
  1076  			if len(knownPeer.InternalEndpoint) > 0 && inst.port%2 != 0 {
  1077  				bothInGroup1 := portOfEndpoint(knownPeer.Endpoint) < 8615 && inst.port < 8615
  1078  				bothInGroup2 := portOfEndpoint(knownPeer.Endpoint) >= 8615 && inst.port >= 8615
  1079  				assert.True(t, bothInGroup1 || bothInGroup2, "%v knows about %v's internal endpoint", inst.port, knownPeer.InternalEndpoint)
  1080  			}
  1081  		}
  1082  	}
  1083  
  1084  	t.Log("Shutting down instance 0...")
  1085  	// Now, we shutdown instance 0 and ensure that peers that shouldn't know it,
  1086  	// do not know it via membership requests
  1087  	stopInstances(t, []*gossipInstance{instances1[0]})
  1088  	time.Sleep(time.Second * 6)
  1089  	for _, inst := range append(instances1[1:], instances2...) {
  1090  		if peersThatShouldBeKnownToPeers[inst.port][0] == 8610 {
  1091  			assert.Equal(t, 1, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
  1092  		} else {
  1093  			assert.Equal(t, 0, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
  1094  		}
  1095  	}
  1096  	stopInstances(t, instances1[1:])
  1097  	stopInstances(t, instances2)
  1098  }
  1099  
  1100  func createDisjointPeerGroupsWithNoGossip(bootPeerMap map[int]int) ([]*gossipInstance, []*gossipInstance) {
  1101  	instances1 := []*gossipInstance{}
  1102  	instances2 := []*gossipInstance{}
  1103  	for group := 0; group < 2; group++ {
  1104  		for i := 0; i < 5; i++ {
  1105  			group := group
  1106  			id := fmt.Sprintf("id%d", group*5+i)
  1107  			port := 8610 + group*5 + i
  1108  			bootPeers := []string{bootPeer(bootPeerMap[port])}
  1109  			pol := discPolForPeer(port)
  1110  			inst := createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(8610+group*5+i, id, bootPeers, pol)
  1111  			inst.initiateSync(defaultTestConfig.AliveExpirationTimeout/3, 10)
  1112  			if group == 0 {
  1113  				instances1 = append(instances1, inst)
  1114  			} else {
  1115  				instances2 = append(instances2, inst)
  1116  			}
  1117  		}
  1118  	}
  1119  	return instances1, instances2
  1120  }
  1121  
  1122  func discPolForPeer(selfPort int) DisclosurePolicy {
  1123  	return func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
  1124  		targetPortStr := strings.Split(remotePeer.Endpoint, ":")[1]
  1125  		targetPort, _ := strconv.ParseInt(targetPortStr, 10, 64)
  1126  		return func(msg *protoext.SignedGossipMessage) bool {
  1127  				portOfAliveMsgStr := strings.Split(msg.GetAliveMsg().Membership.Endpoint, ":")[1]
  1128  				portOfAliveMsg, _ := strconv.ParseInt(portOfAliveMsgStr, 10, 64)
  1129  
  1130  				if portOfAliveMsg < 8615 && targetPort < 8615 {
  1131  					return true
  1132  				}
  1133  				if portOfAliveMsg >= 8615 && targetPort >= 8615 {
  1134  					return true
  1135  				}
  1136  
  1137  				// Else, expose peers with even ids to other peers with even ids
  1138  				return portOfAliveMsg%2 == 0 && targetPort%2 == 0
  1139  			}, func(msg *protoext.SignedGossipMessage) *proto.Envelope {
  1140  				envelope := protoG.Clone(msg.Envelope).(*proto.Envelope)
  1141  				if selfPort < 8615 && targetPort >= 8615 {
  1142  					envelope.SecretEnvelope = nil
  1143  				}
  1144  
  1145  				if selfPort >= 8615 && targetPort < 8615 {
  1146  					envelope.SecretEnvelope = nil
  1147  				}
  1148  
  1149  				return envelope
  1150  			}
  1151  	}
  1152  }
  1153  
  1154  func TestCertificateChange(t *testing.T) {
  1155  	bootPeers := []string{bootPeer(42611), bootPeer(42612), bootPeer(42613)}
  1156  	p1 := createDiscoveryInstance(42611, "d1", bootPeers)
  1157  	p2 := createDiscoveryInstance(42612, "d2", bootPeers)
  1158  	p3 := createDiscoveryInstance(42613, "d3", bootPeers)
  1159  
  1160  	// Wait for membership establishment
  1161  	assertMembership(t, []*gossipInstance{p1, p2, p3}, 2)
  1162  
  1163  	// Shutdown the second peer
  1164  	waitUntilOrFailBlocking(t, p2.Stop)
  1165  
  1166  	var pingCountFrom1 uint32
  1167  	var pingCountFrom3 uint32
  1168  	// Program mocks to increment ping counters
  1169  	p1.comm.lock.Lock()
  1170  	p1.comm.mock = &mock.Mock{}
  1171  	p1.comm.mock.On("SendToPeer", mock.Anything, mock.Anything)
  1172  	p1.comm.mock.On("Ping").Run(func(arguments mock.Arguments) {
  1173  		atomic.AddUint32(&pingCountFrom1, 1)
  1174  	})
  1175  	p1.comm.lock.Unlock()
  1176  
  1177  	p3.comm.lock.Lock()
  1178  	p3.comm.mock = &mock.Mock{}
  1179  	p3.comm.mock.On("SendToPeer", mock.Anything, mock.Anything)
  1180  	p3.comm.mock.On("Ping").Run(func(arguments mock.Arguments) {
  1181  		atomic.AddUint32(&pingCountFrom3, 1)
  1182  	})
  1183  	p3.comm.lock.Unlock()
  1184  
  1185  	pingCount1 := func() uint32 {
  1186  		return atomic.LoadUint32(&pingCountFrom1)
  1187  	}
  1188  
  1189  	pingCount3 := func() uint32 {
  1190  		return atomic.LoadUint32(&pingCountFrom3)
  1191  	}
  1192  
  1193  	c1 := pingCount1()
  1194  	c3 := pingCount3()
  1195  
  1196  	// Ensure the first peer and third peer try to reconnect to it
  1197  	waitUntilTimeoutOrFail(t, func() bool {
  1198  		return pingCount1() > c1 && pingCount3() > c3
  1199  	}, timeout)
  1200  
  1201  	// Tell the first peer that the second peer's PKI-ID has changed
  1202  	// So that it will purge it from the membership entirely
  1203  	p1.comm.identitySwitch <- common.PKIidType("localhost:42612")
  1204  
  1205  	c1 = pingCount1()
  1206  	c3 = pingCount3()
  1207  	// Ensure third peer tries to reconnect to it
  1208  	waitUntilTimeoutOrFail(t, func() bool {
  1209  		return pingCount3() > c3
  1210  	}, timeout)
  1211  
  1212  	// Ensure the first peer ceases from trying
  1213  	assert.Equal(t, c1, pingCount1())
  1214  
  1215  	waitUntilOrFailBlocking(t, p1.Stop)
  1216  	waitUntilOrFailBlocking(t, p3.Stop)
  1217  }
  1218  
  1219  func TestMsgStoreExpiration(t *testing.T) {
  1220  	// Starts 4 instances, wait for membership to build, stop 2 instances
  1221  	// Check that membership in 2 running instances become 2
  1222  	// Wait for expiration and check that alive messages and related entities in maps are removed in running instances
  1223  	nodeNum := 4
  1224  	bootPeers := []string{bootPeer(12611), bootPeer(12612)}
  1225  	instances := []*gossipInstance{}
  1226  
  1227  	inst := createDiscoveryInstance(12611, "d1", bootPeers)
  1228  	instances = append(instances, inst)
  1229  
  1230  	inst = createDiscoveryInstance(12612, "d2", bootPeers)
  1231  	instances = append(instances, inst)
  1232  
  1233  	for i := 3; i <= nodeNum; i++ {
  1234  		id := fmt.Sprintf("d%d", i)
  1235  		inst = createDiscoveryInstance(12610+i, id, bootPeers)
  1236  		instances = append(instances, inst)
  1237  	}
  1238  
  1239  	assertMembership(t, instances, nodeNum-1)
  1240  
  1241  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
  1242  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
  1243  
  1244  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
  1245  
  1246  	checkMessages := func() bool {
  1247  		for _, inst := range instances[:len(instances)-2] {
  1248  			for _, downInst := range instances[len(instances)-2:] {
  1249  				downCastInst := inst.discoveryImpl()
  1250  				downCastInst.lock.RLock()
  1251  				if _, exist := downCastInst.aliveLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1252  					downCastInst.lock.RUnlock()
  1253  					return false
  1254  				}
  1255  				if _, exist := downCastInst.deadLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1256  					downCastInst.lock.RUnlock()
  1257  					return false
  1258  				}
  1259  				if _, exist := downCastInst.id2Member[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1260  					downCastInst.lock.RUnlock()
  1261  					return false
  1262  				}
  1263  				if downCastInst.aliveMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
  1264  					downCastInst.lock.RUnlock()
  1265  					return false
  1266  				}
  1267  				if downCastInst.deadMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
  1268  					downCastInst.lock.RUnlock()
  1269  					return false
  1270  				}
  1271  				for _, am := range downCastInst.msgStore.Get() {
  1272  					m := am.(*protoext.SignedGossipMessage).GetAliveMsg()
  1273  					if bytes.Equal(m.Membership.PkiId, downInst.discoveryImpl().self.PKIid) {
  1274  						downCastInst.lock.RUnlock()
  1275  						return false
  1276  					}
  1277  				}
  1278  				downCastInst.lock.RUnlock()
  1279  			}
  1280  		}
  1281  		return true
  1282  	}
  1283  
  1284  	waitUntilTimeoutOrFail(t, checkMessages, defaultTestConfig.AliveExpirationTimeout*(DefMsgExpirationFactor+5))
  1285  
  1286  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
  1287  
  1288  	stopInstances(t, instances[:len(instances)-2])
  1289  }
  1290  
  1291  func TestExpirationNoSecretEnvelope(t *testing.T) {
  1292  	l, err := zap.NewDevelopment()
  1293  	assert.NoError(t, err)
  1294  
  1295  	removed := make(chan struct{})
  1296  	logger := flogging.NewFabricLogger(l, zap.Hooks(func(entry zapcore.Entry) error {
  1297  		if strings.Contains(entry.Message, "Removing member: Endpoint: foo") {
  1298  			removed <- struct{}{}
  1299  		}
  1300  		return nil
  1301  	}))
  1302  
  1303  	mockTracker := &mockAnchorPeerTracker{}
  1304  	msgStore := newAliveMsgStore(&gossipDiscoveryImpl{
  1305  		aliveExpirationTimeout: time.Millisecond,
  1306  		lock:                   &sync.RWMutex{},
  1307  		aliveMembership:        util.NewMembershipStore(),
  1308  		deadMembership:         util.NewMembershipStore(),
  1309  		logger:                 logger,
  1310  		anchorPeerTracker:      mockTracker,
  1311  	})
  1312  
  1313  	msg := &proto.GossipMessage{
  1314  		Content: &proto.GossipMessage_AliveMsg{
  1315  			AliveMsg: &proto.AliveMessage{Membership: &proto.Member{
  1316  				Endpoint: "foo",
  1317  			}},
  1318  		},
  1319  	}
  1320  
  1321  	sMsg, err := protoext.NoopSign(msg)
  1322  	assert.NoError(t, err)
  1323  
  1324  	msgStore.Add(sMsg)
  1325  	select {
  1326  	case <-removed:
  1327  	case <-time.After(time.Second * 10):
  1328  		t.Fatalf("timed out")
  1329  	}
  1330  }
  1331  
  1332  func TestMsgStoreExpirationWithMembershipMessages(t *testing.T) {
  1333  	// Creates 3 discovery instances without gossip communication
  1334  	// Generates MembershipRequest msg for each instance using createMembershipRequest
  1335  	// Generates Alive msg for each instance using createAliveMessage
  1336  	// Builds membership using Alive msgs
  1337  	// Checks msgStore and related maps
  1338  	// Generates MembershipResponse msgs for each instance using createMembershipResponse
  1339  	// Generates new set of Alive msgs and processes them
  1340  	// Checks msgStore and related maps
  1341  	// Waits for expiration and checks msgStore and related maps
  1342  	// Processes stored MembershipRequest msg and checks msgStore and related maps
  1343  	// Processes stored MembershipResponse msg and checks msgStore and related maps
  1344  	bootPeers := []string{}
  1345  	peersNum := 3
  1346  	instances := []*gossipInstance{}
  1347  	aliveMsgs := []*protoext.SignedGossipMessage{}
  1348  	newAliveMsgs := []*protoext.SignedGossipMessage{}
  1349  	memReqMsgs := []*protoext.SignedGossipMessage{}
  1350  	memRespMsgs := make(map[int][]*proto.MembershipResponse)
  1351  
  1352  	for i := 0; i < peersNum; i++ {
  1353  		id := fmt.Sprintf("d%d", i)
  1354  		inst := createDiscoveryInstanceWithNoGossip(22610+i, id, bootPeers)
  1355  		inst.comm.disableComm = true
  1356  		instances = append(instances, inst)
  1357  	}
  1358  
  1359  	// Creating MembershipRequest messages
  1360  	for i := 0; i < peersNum; i++ {
  1361  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
  1362  		sMsg, _ := protoext.NoopSign(memReqMsg)
  1363  		memReqMsgs = append(memReqMsgs, sMsg)
  1364  	}
  1365  	// Creating Alive messages
  1366  	for i := 0; i < peersNum; i++ {
  1367  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1368  		aliveMsgs = append(aliveMsgs, aliveMsg)
  1369  	}
  1370  
  1371  	repeatForFiltered := func(n int, filter func(i int) bool, action func(i int)) {
  1372  		for i := 0; i < n; i++ {
  1373  			if filter(i) {
  1374  				continue
  1375  			}
  1376  			action(i)
  1377  		}
  1378  	}
  1379  
  1380  	// Handling Alive
  1381  	for i := 0; i < peersNum; i++ {
  1382  		for k := 0; k < peersNum; k++ {
  1383  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1384  				msg: aliveMsgs[k],
  1385  				info: &protoext.ConnectionInfo{
  1386  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1387  				},
  1388  			})
  1389  		}
  1390  	}
  1391  
  1392  	checkExistence := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, i int, step string) {
  1393  		_, exist := instances[index].discoveryImpl().aliveLastTS[string(instances[i].discoveryImpl().self.PKIid)]
  1394  		assert.True(t, exist, fmt.Sprint(step, " Data from alive msg ", i, " doesn't exist in aliveLastTS of discovery inst ", index))
  1395  
  1396  		_, exist = instances[index].discoveryImpl().id2Member[string(string(instances[i].discoveryImpl().self.PKIid))]
  1397  		assert.True(t, exist, fmt.Sprint(step, " id2Member mapping doesn't exist for alive msg ", i, " of discovery inst ", index))
  1398  
  1399  		assert.NotNil(t, instances[index].discoveryImpl().aliveMembership.MsgByID(instances[i].discoveryImpl().self.PKIid), fmt.Sprint(step, " Alive msg", i, " not exist in aliveMembership of discovery inst ", index))
  1400  
  1401  		assert.Contains(t, instances[index].discoveryImpl().msgStore.Get(), msgs[i], fmt.Sprint(step, " Alive msg ", i, "not stored in store of discovery inst ", index))
  1402  	}
  1403  
  1404  	checkAliveMsgExist := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, step string) {
  1405  		instances[index].discoveryImpl().lock.RLock()
  1406  		defer instances[index].discoveryImpl().lock.RUnlock()
  1407  		repeatForFiltered(peersNum,
  1408  			func(k int) bool {
  1409  				return k == index
  1410  			},
  1411  			func(k int) {
  1412  				checkExistence(instances, msgs, index, k, step)
  1413  			})
  1414  	}
  1415  
  1416  	// Checking is Alive was processed
  1417  	for i := 0; i < peersNum; i++ {
  1418  		checkAliveMsgExist(instances, aliveMsgs, i, "[Step 1 - processing aliveMsg]")
  1419  	}
  1420  
  1421  	// Creating MembershipResponse while all instances have full membership
  1422  	for i := 0; i < peersNum; i++ {
  1423  		peerToResponse := &NetworkMember{
  1424  			Metadata:         []byte{},
  1425  			PKIid:            []byte(fmt.Sprintf("localhost:%d", 22610+i)),
  1426  			Endpoint:         fmt.Sprintf("localhost:%d", 22610+i),
  1427  			InternalEndpoint: fmt.Sprintf("localhost:%d", 22610+i),
  1428  		}
  1429  		memRespMsgs[i] = []*proto.MembershipResponse{}
  1430  		repeatForFiltered(peersNum,
  1431  			func(k int) bool {
  1432  				return k == i
  1433  			},
  1434  			func(k int) {
  1435  				aliveMsg, _ := instances[k].discoveryImpl().createSignedAliveMessage(true)
  1436  				memResp := instances[k].discoveryImpl().createMembershipResponse(aliveMsg, peerToResponse)
  1437  				memRespMsgs[i] = append(memRespMsgs[i], memResp)
  1438  			})
  1439  	}
  1440  
  1441  	// Re-creating Alive msgs with highest seq_num, to make sure Alive msgs in memReq and memResp are older
  1442  	for i := 0; i < peersNum; i++ {
  1443  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1444  		newAliveMsgs = append(newAliveMsgs, aliveMsg)
  1445  	}
  1446  
  1447  	// Handling new Alive set
  1448  	for i := 0; i < peersNum; i++ {
  1449  		for k := 0; k < peersNum; k++ {
  1450  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1451  				msg: newAliveMsgs[k],
  1452  				info: &protoext.ConnectionInfo{
  1453  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1454  				},
  1455  			})
  1456  		}
  1457  	}
  1458  
  1459  	// Checking is new Alive was processed
  1460  	for i := 0; i < peersNum; i++ {
  1461  		checkAliveMsgExist(instances, newAliveMsgs, i, "[Step 2 - proccesing aliveMsg]")
  1462  	}
  1463  
  1464  	checkAliveMsgNotExist := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, step string) {
  1465  		instances[index].discoveryImpl().lock.RLock()
  1466  		defer instances[index].discoveryImpl().lock.RUnlock()
  1467  		assert.Empty(t, instances[index].discoveryImpl().aliveLastTS, fmt.Sprint(step, " Data from alive msg still exists in aliveLastTS of discovery inst ", index))
  1468  		assert.Empty(t, instances[index].discoveryImpl().deadLastTS, fmt.Sprint(step, " Data from alive msg still exists in deadLastTS of discovery inst ", index))
  1469  		assert.Empty(t, instances[index].discoveryImpl().id2Member, fmt.Sprint(step, " id2Member mapping still still contains data related to Alive msg: discovery inst ", index))
  1470  		assert.Empty(t, instances[index].discoveryImpl().msgStore.Get(), fmt.Sprint(step, " Expired Alive msg still stored in store of discovery inst ", index))
  1471  		assert.Zero(t, instances[index].discoveryImpl().aliveMembership.Size(), fmt.Sprint(step, " Alive membership list is not empty, discovery instance", index))
  1472  		assert.Zero(t, instances[index].discoveryImpl().deadMembership.Size(), fmt.Sprint(step, " Dead membership list is not empty, discovery instance", index))
  1473  	}
  1474  
  1475  	// Sleep until expire
  1476  	time.Sleep(defaultTestConfig.AliveExpirationTimeout * (DefMsgExpirationFactor + 5))
  1477  
  1478  	// Checking Alive expired
  1479  	for i := 0; i < peersNum; i++ {
  1480  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step 3 - expiration in msg store]")
  1481  	}
  1482  
  1483  	// Processing old MembershipRequest
  1484  	for i := 0; i < peersNum; i++ {
  1485  		repeatForFiltered(peersNum,
  1486  			func(k int) bool {
  1487  				return k == i
  1488  			},
  1489  			func(k int) {
  1490  				instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1491  					msg: memReqMsgs[k],
  1492  					info: &protoext.ConnectionInfo{
  1493  						ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1494  					},
  1495  				})
  1496  			})
  1497  	}
  1498  
  1499  	// MembershipRequest processing didn't change anything
  1500  	for i := 0; i < peersNum; i++ {
  1501  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 4 - memReq processing after expiration]")
  1502  	}
  1503  
  1504  	// Processing old (later) Alive messages
  1505  	for i := 0; i < peersNum; i++ {
  1506  		for k := 0; k < peersNum; k++ {
  1507  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1508  				msg: aliveMsgs[k],
  1509  				info: &protoext.ConnectionInfo{
  1510  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1511  				},
  1512  			})
  1513  		}
  1514  	}
  1515  
  1516  	// Alive msg processing didn't change anything
  1517  	for i := 0; i < peersNum; i++ {
  1518  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 5.1 - after lost old aliveMsg process]")
  1519  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step 5.2 - after lost new aliveMsg process]")
  1520  	}
  1521  
  1522  	// Handling old MembershipResponse messages
  1523  	for i := 0; i < peersNum; i++ {
  1524  		respForPeer := memRespMsgs[i]
  1525  		for _, msg := range respForPeer {
  1526  			sMsg, _ := protoext.NoopSign(&proto.GossipMessage{
  1527  				Tag:   proto.GossipMessage_EMPTY,
  1528  				Nonce: uint64(0),
  1529  				Content: &proto.GossipMessage_MemRes{
  1530  					MemRes: msg,
  1531  				},
  1532  			})
  1533  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1534  				msg: sMsg,
  1535  				info: &protoext.ConnectionInfo{
  1536  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1537  				},
  1538  			})
  1539  		}
  1540  	}
  1541  
  1542  	// MembershipResponse msg processing didn't change anything
  1543  	for i := 0; i < peersNum; i++ {
  1544  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 6 - after lost MembershipResp process]")
  1545  	}
  1546  
  1547  	for i := 0; i < peersNum; i++ {
  1548  		instances[i].Stop()
  1549  	}
  1550  
  1551  }
  1552  
  1553  func TestAliveMsgStore(t *testing.T) {
  1554  	bootPeers := []string{}
  1555  	peersNum := 2
  1556  	instances := []*gossipInstance{}
  1557  	aliveMsgs := []*protoext.SignedGossipMessage{}
  1558  	memReqMsgs := []*protoext.SignedGossipMessage{}
  1559  
  1560  	for i := 0; i < peersNum; i++ {
  1561  		id := fmt.Sprintf("d%d", i)
  1562  		inst := createDiscoveryInstanceWithNoGossip(32610+i, id, bootPeers)
  1563  		instances = append(instances, inst)
  1564  	}
  1565  
  1566  	// Creating MembershipRequest messages
  1567  	for i := 0; i < peersNum; i++ {
  1568  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
  1569  		sMsg, _ := protoext.NoopSign(memReqMsg)
  1570  		memReqMsgs = append(memReqMsgs, sMsg)
  1571  	}
  1572  	// Creating Alive messages
  1573  	for i := 0; i < peersNum; i++ {
  1574  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1575  		aliveMsgs = append(aliveMsgs, aliveMsg)
  1576  	}
  1577  
  1578  	//Check new alive msgs
  1579  	for _, msg := range aliveMsgs {
  1580  		assert.True(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns false on new AliveMsg")
  1581  	}
  1582  
  1583  	// Add new alive msgs
  1584  	for _, msg := range aliveMsgs {
  1585  		assert.True(t, instances[0].discoveryImpl().msgStore.Add(msg), "aliveMsgStore Add returns false on new AliveMsg")
  1586  	}
  1587  
  1588  	// Check exist alive msgs
  1589  	for _, msg := range aliveMsgs {
  1590  		assert.False(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns true on existing AliveMsg")
  1591  	}
  1592  
  1593  	// Check non-alive msgs
  1594  	for _, msg := range memReqMsgs {
  1595  		assert.Panics(t, func() { instances[1].discoveryImpl().msgStore.CheckValid(msg) }, "aliveMsgStore CheckValid should panic on new MembershipRequest msg")
  1596  		assert.Panics(t, func() { instances[1].discoveryImpl().msgStore.Add(msg) }, "aliveMsgStore Add should panic on new MembershipRequest msg")
  1597  	}
  1598  }
  1599  
  1600  func TestMemRespDisclosurePol(t *testing.T) {
  1601  	pol := func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
  1602  		return func(_ *protoext.SignedGossipMessage) bool {
  1603  				return remotePeer.Endpoint != "localhost:7879"
  1604  			}, func(m *protoext.SignedGossipMessage) *proto.Envelope {
  1605  				return m.Envelope
  1606  			}
  1607  	}
  1608  
  1609  	wasMembershipResponseReceived := func(msg *protoext.SignedGossipMessage) {
  1610  		assert.Nil(t, msg.GetMemRes())
  1611  	}
  1612  
  1613  	d1 := createDiscoveryInstanceThatGossips(7878, "d1", []string{}, true, pol, defaultTestConfig)
  1614  	defer d1.Stop()
  1615  	d2 := createDiscoveryInstanceThatGossipsWithInterceptors(7879, "d2", []string{"localhost:7878"}, true, noopPolicy, wasMembershipResponseReceived, defaultTestConfig)
  1616  	defer d2.Stop()
  1617  	d3 := createDiscoveryInstanceThatGossips(7880, "d3", []string{"localhost:7878"}, true, pol, defaultTestConfig)
  1618  	defer d3.Stop()
  1619  
  1620  	// all peers know each other
  1621  	assertMembership(t, []*gossipInstance{d1, d2, d3}, 2)
  1622  	// d2 received some messages, but we asserted that none of them are membership responses.
  1623  	assert.NotZero(t, d2.receivedMsgCount())
  1624  	assert.NotZero(t, d2.sentMsgCount())
  1625  }
  1626  
  1627  func TestMembersByID(t *testing.T) {
  1628  	members := Members{
  1629  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1630  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1631  	}
  1632  	byID := members.ByID()
  1633  	assert.Len(t, byID, 2)
  1634  	assert.Equal(t, "p0", byID["p0"].Endpoint)
  1635  	assert.Equal(t, "p1", byID["p1"].Endpoint)
  1636  }
  1637  
  1638  func TestFilter(t *testing.T) {
  1639  	members := Members{
  1640  		{PKIid: common.PKIidType("p0"), Endpoint: "p0", Properties: &proto.Properties{
  1641  			Chaincodes: []*proto.Chaincode{{Name: "cc", Version: "1.0"}},
  1642  		}},
  1643  		{PKIid: common.PKIidType("p1"), Endpoint: "p1", Properties: &proto.Properties{
  1644  			Chaincodes: []*proto.Chaincode{{Name: "cc", Version: "2.0"}},
  1645  		}},
  1646  	}
  1647  	res := members.Filter(func(member NetworkMember) bool {
  1648  		cc := member.Properties.Chaincodes[0]
  1649  		return cc.Version == "2.0" && cc.Name == "cc"
  1650  	})
  1651  	assert.Equal(t, Members{members[1]}, res)
  1652  }
  1653  
  1654  func TestMap(t *testing.T) {
  1655  	members := Members{
  1656  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1657  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1658  	}
  1659  	expectedMembers := Members{
  1660  		{PKIid: common.PKIidType("p0"), Endpoint: "p0", Properties: &proto.Properties{LedgerHeight: 2}},
  1661  		{PKIid: common.PKIidType("p1"), Endpoint: "p1", Properties: &proto.Properties{LedgerHeight: 2}},
  1662  	}
  1663  
  1664  	addProperty := func(member NetworkMember) NetworkMember {
  1665  		member.Properties = &proto.Properties{
  1666  			LedgerHeight: 2,
  1667  		}
  1668  		return member
  1669  	}
  1670  
  1671  	assert.Equal(t, expectedMembers, members.Map(addProperty))
  1672  	// Ensure original members didn't change
  1673  	assert.Nil(t, members[0].Properties)
  1674  	assert.Nil(t, members[1].Properties)
  1675  }
  1676  
  1677  func TestMembersIntersect(t *testing.T) {
  1678  	members1 := Members{
  1679  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1680  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1681  	}
  1682  	members2 := Members{
  1683  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1684  		{PKIid: common.PKIidType("p2"), Endpoint: "p2"},
  1685  	}
  1686  	assert.Equal(t, Members{{PKIid: common.PKIidType("p1"), Endpoint: "p1"}}, members1.Intersect(members2))
  1687  }
  1688  
  1689  func TestPeerIsolation(t *testing.T) {
  1690  	// Scenario:
  1691  	// Start 3 peers (peer0, peer1, peer2). Set peer1 as the bootstrap peer for all.
  1692  	// Stop peer0 and peer1 for a while, start them again and test if peer2 still gets full membership
  1693  
  1694  	config := defaultTestConfig
  1695  	// Use a smaller AliveExpirationTimeout than the default to reduce the running time of the test.
  1696  	config.AliveExpirationTimeout = 2 * config.AliveTimeInterval
  1697  
  1698  	peersNum := 3
  1699  	bootPeers := []string{bootPeer(7121)}
  1700  	instances := []*gossipInstance{}
  1701  	var inst *gossipInstance
  1702  
  1703  	// Start all peers and wait for full membership
  1704  	for i := 0; i < peersNum; i++ {
  1705  		id := fmt.Sprintf("d%d", i)
  1706  		inst = createDiscoveryInstanceCustomConfig(7120+i, id, bootPeers, config)
  1707  		instances = append(instances, inst)
  1708  	}
  1709  	assertMembership(t, instances, peersNum-1)
  1710  
  1711  	// Stop the first 2 peers so the third peer would stay alone
  1712  	stopInstances(t, instances[:peersNum-1])
  1713  	assertMembership(t, instances[peersNum-1:], 0)
  1714  
  1715  	// Sleep the same amount of time as it takes to remove a message from the aliveMsgStore (aliveMsgTTL)
  1716  	// Add a second as buffer
  1717  	time.Sleep(config.AliveExpirationTimeout*DefMsgExpirationFactor + time.Second)
  1718  
  1719  	// Start again the first 2 peers and wait for all the peers to get full membership.
  1720  	// Especially, we want to test that peer2 won't be isolated
  1721  	for i := 0; i < peersNum-1; i++ {
  1722  		id := fmt.Sprintf("d%d", i)
  1723  		inst = createDiscoveryInstanceCustomConfig(7120+i, id, bootPeers, config)
  1724  		instances[i] = inst
  1725  	}
  1726  	assertMembership(t, instances, peersNum-1)
  1727  }
  1728  
  1729  func TestMembershipAfterExpiration(t *testing.T) {
  1730  	// Scenario:
  1731  	// Start 3 peers (peer0, peer1, peer2). Set peer0 as the anchor peer.
  1732  	// Stop peer0 and peer1 for a while, start them again and test if peer2 still gets full membership
  1733  
  1734  	config := defaultTestConfig
  1735  	// Use a smaller AliveExpirationTimeout than the default to reduce the running time of the test.
  1736  	config.AliveExpirationTimeout = 2 * config.AliveTimeInterval
  1737  	config.ReconnectInterval = config.AliveExpirationTimeout
  1738  	config.MsgExpirationFactor = 5
  1739  
  1740  	peersNum := 3
  1741  	ports := []int{9120, 9121, 9122}
  1742  	anchorPeer := "localhost:9120"
  1743  	bootPeers := []string{}
  1744  	instances := []*gossipInstance{}
  1745  	var inst *gossipInstance
  1746  	mockTracker := &mockAnchorPeerTracker{[]string{anchorPeer}}
  1747  
  1748  	l, err := zap.NewDevelopment()
  1749  	assert.NoError(t, err)
  1750  	expired := make(chan struct{}, 1)
  1751  
  1752  	// use a custom logger to verify messages from expiration callback
  1753  	loggerThatTracksCustomMessage := func() util.Logger {
  1754  		var lock sync.RWMutex
  1755  		expectedMsgs := map[string]struct{}{
  1756  			"Do not remove bootstrap or anchor peer endpoint localhost:9120 from membership":                                   {},
  1757  			"Removing member: Endpoint: localhost:9121, InternalEndpoint: localhost:9121, PKIID: 6c6f63616c686f73743a39313231": {},
  1758  		}
  1759  
  1760  		return flogging.NewFabricLogger(l, zap.Hooks(func(entry zapcore.Entry) error {
  1761  			// do nothing if we already found all the expectedMsgs
  1762  			lock.RLock()
  1763  			expectedMsgSize := len(expectedMsgs)
  1764  			lock.RUnlock()
  1765  
  1766  			if expectedMsgSize == 0 {
  1767  				select {
  1768  				case expired <- struct{}{}:
  1769  				default:
  1770  					// no room is fine, continue
  1771  				}
  1772  				return nil
  1773  			}
  1774  
  1775  			lock.Lock()
  1776  			defer lock.Unlock()
  1777  
  1778  			if _, matched := expectedMsgs[entry.Message]; matched {
  1779  				delete(expectedMsgs, entry.Message)
  1780  			}
  1781  			return nil
  1782  		}))
  1783  	}
  1784  
  1785  	// Start all peers, connect to the anchor peer and verify full membership
  1786  	for i := 0; i < peersNum; i++ {
  1787  		id := fmt.Sprintf("d%d", i)
  1788  		logger := loggerThatTracksCustomMessage()
  1789  		inst = createDiscoveryInstanceWithAnchorPeerTracker(ports[i], id, bootPeers, true, noopPolicy, func(_ *protoext.SignedGossipMessage) {}, config, mockTracker, logger)
  1790  		instances = append(instances, inst)
  1791  	}
  1792  	for i := 1; i < peersNum; i++ {
  1793  		connect(instances[i], anchorPeer)
  1794  	}
  1795  	assertMembership(t, instances, peersNum-1)
  1796  
  1797  	// Stop peer0 and peer1 so that peer2 would stay alone
  1798  	stopInstances(t, instances[0:peersNum-1])
  1799  
  1800  	// waitTime is the same amount of time as it takes to remove a message from the aliveMsgStore (aliveMsgTTL)
  1801  	// Add a second as buffer
  1802  	waitTime := config.AliveExpirationTimeout*time.Duration(config.MsgExpirationFactor) + time.Second
  1803  	select {
  1804  	case <-expired:
  1805  	case <-time.After(waitTime):
  1806  		t.Fatalf("timed out")
  1807  	}
  1808  	// peer2's deadMembership should contain the anchor peer
  1809  	deadMemeberShip := instances[peersNum-1].discoveryImpl().deadMembership
  1810  	assert.Equal(t, 1, deadMemeberShip.Size())
  1811  	assertMembership(t, instances[peersNum-1:], 0)
  1812  
  1813  	// Start again peer0 and peer1 and wait for all the peers to get full membership.
  1814  	// Especially, we want to test that peer2 won't be isolated
  1815  	for i := 0; i < peersNum-1; i++ {
  1816  		id := fmt.Sprintf("d%d", i)
  1817  		inst = createDiscoveryInstanceWithAnchorPeerTracker(ports[i], id, bootPeers, true, noopPolicy, func(_ *protoext.SignedGossipMessage) {}, config, mockTracker, nil)
  1818  		instances[i] = inst
  1819  	}
  1820  	connect(instances[1], anchorPeer)
  1821  	assertMembership(t, instances, peersNum-1)
  1822  }
  1823  
  1824  func connect(inst *gossipInstance, endpoint string) {
  1825  	inst.comm.lock.Lock()
  1826  	inst.comm.mock = &mock.Mock{}
  1827  	inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
  1828  		inst := inst
  1829  		msg := arguments.Get(1).(*protoext.SignedGossipMessage)
  1830  		if req := msg.GetMemReq(); req != nil {
  1831  			inst.comm.lock.Lock()
  1832  			inst.comm.mock = nil
  1833  			inst.comm.lock.Unlock()
  1834  		}
  1835  	})
  1836  	inst.comm.mock.On("Ping", mock.Anything)
  1837  	inst.comm.lock.Unlock()
  1838  	netMember2Connect2 := NetworkMember{Endpoint: endpoint, PKIid: []byte(endpoint)}
  1839  	inst.Connect(netMember2Connect2, func() (identification *PeerIdentification, err error) {
  1840  		return &PeerIdentification{SelfOrg: true, ID: nil}, nil
  1841  	})
  1842  }
  1843  
  1844  func waitUntilOrFail(t *testing.T, pred func() bool) {
  1845  	waitUntilTimeoutOrFail(t, pred, timeout)
  1846  }
  1847  
  1848  func waitUntilTimeoutOrFail(t *testing.T, pred func() bool, timeout time.Duration) {
  1849  	start := time.Now()
  1850  	limit := start.UnixNano() + timeout.Nanoseconds()
  1851  	for time.Now().UnixNano() < limit {
  1852  		if pred() {
  1853  			return
  1854  		}
  1855  		time.Sleep(timeout / 10)
  1856  	}
  1857  	assert.Fail(t, "Timeout expired!")
  1858  }
  1859  
  1860  func waitUntilOrFailBlocking(t *testing.T, f func()) {
  1861  	successChan := make(chan struct{}, 1)
  1862  	go func() {
  1863  		f()
  1864  		successChan <- struct{}{}
  1865  	}()
  1866  	select {
  1867  	case <-time.NewTimer(timeout).C:
  1868  		break
  1869  	case <-successChan:
  1870  		return
  1871  	}
  1872  	assert.Fail(t, "Timeout expired!")
  1873  }
  1874  
  1875  func stopInstances(t *testing.T, instances []*gossipInstance) {
  1876  	stopAction := &sync.WaitGroup{}
  1877  	for _, inst := range instances {
  1878  		stopAction.Add(1)
  1879  		go func(inst *gossipInstance) {
  1880  			defer stopAction.Done()
  1881  			inst.Stop()
  1882  		}(inst)
  1883  	}
  1884  
  1885  	waitUntilOrFailBlocking(t, stopAction.Wait)
  1886  }
  1887  
  1888  func assertMembership(t *testing.T, instances []*gossipInstance, expectedNum int) {
  1889  	wg := sync.WaitGroup{}
  1890  	wg.Add(len(instances))
  1891  
  1892  	ctx, cancelation := context.WithTimeout(context.Background(), timeout)
  1893  	defer cancelation()
  1894  
  1895  	for _, inst := range instances {
  1896  		go func(ctx context.Context, i *gossipInstance) {
  1897  			defer wg.Done()
  1898  			for {
  1899  				select {
  1900  				case <-ctx.Done():
  1901  					return
  1902  				case <-time.After(timeout / 10):
  1903  					if len(i.GetMembership()) == expectedNum {
  1904  						return
  1905  					}
  1906  				}
  1907  			}
  1908  		}(ctx, inst)
  1909  	}
  1910  
  1911  	wg.Wait()
  1912  	assert.NoError(t, ctx.Err(), "Timeout expired!")
  1913  }
  1914  
  1915  func portsOfMembers(members []NetworkMember) []int {
  1916  	ports := make([]int, len(members))
  1917  	for i := range members {
  1918  		ports[i] = portOfEndpoint(members[i].Endpoint)
  1919  	}
  1920  	sort.Ints(ports)
  1921  	return ports
  1922  }
  1923  
  1924  func portOfEndpoint(endpoint string) int {
  1925  	port, _ := strconv.ParseInt(strings.Split(endpoint, ":")[1], 10, 64)
  1926  	return int(port)
  1927  }