github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/gossip/discovery/discovery_test.go (about)

     1  /*
     2  Copyright hechain. 2022 All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package discovery
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"io"
    14  	"math/rand"
    15  	"net"
    16  	"sort"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"sync/atomic"
    21  	"testing"
    22  	"time"
    23  
    24  	protoG "github.com/golang/protobuf/proto"
    25  	"github.com/hechain20/hechain/common/flogging"
    26  	"github.com/hechain20/hechain/gossip/common"
    27  	"github.com/hechain20/hechain/gossip/gossip/msgstore"
    28  	"github.com/hechain20/hechain/gossip/protoext"
    29  	"github.com/hechain20/hechain/gossip/util"
    30  	proto "github.com/hyperledger/fabric-protos-go/gossip"
    31  	"github.com/stretchr/testify/assert"
    32  	"github.com/stretchr/testify/mock"
    33  	"github.com/stretchr/testify/require"
    34  	"go.uber.org/zap"
    35  	"go.uber.org/zap/zapcore"
    36  	"google.golang.org/grpc"
    37  	"google.golang.org/grpc/connectivity"
    38  )
    39  
    40  var timeout = time.Second * time.Duration(15)
    41  
    42  var (
    43  	aliveTimeInterval = time.Duration(time.Millisecond * 300)
    44  	defaultTestConfig = DiscoveryConfig{
    45  		AliveTimeInterval:            aliveTimeInterval,
    46  		AliveExpirationTimeout:       10 * aliveTimeInterval,
    47  		AliveExpirationCheckInterval: aliveTimeInterval,
    48  		ReconnectInterval:            10 * aliveTimeInterval,
    49  		MaxConnectionAttempts:        DefMaxConnectionAttempts,
    50  		MsgExpirationFactor:          DefMsgExpirationFactor,
    51  	}
    52  )
    53  
    54  func init() {
    55  	util.SetupTestLogging()
    56  	defaultTestConfig.MaxConnectionAttempts = 10000
    57  }
    58  
    59  type dummyReceivedMessage struct {
    60  	msg  *protoext.SignedGossipMessage
    61  	info *protoext.ConnectionInfo
    62  }
    63  
    64  func (*dummyReceivedMessage) Respond(msg *proto.GossipMessage) {
    65  	panic("implement me")
    66  }
    67  
    68  func (rm *dummyReceivedMessage) GetGossipMessage() *protoext.SignedGossipMessage {
    69  	return rm.msg
    70  }
    71  
    72  func (*dummyReceivedMessage) GetSourceEnvelope() *proto.Envelope {
    73  	panic("implement me")
    74  }
    75  
    76  func (rm *dummyReceivedMessage) GetConnectionInfo() *protoext.ConnectionInfo {
    77  	return rm.info
    78  }
    79  
    80  func (*dummyReceivedMessage) Ack(err error) {
    81  	panic("implement me")
    82  }
    83  
    84  // mockAnchorPeerTracker implements AnchorPeerTracker interface
    85  type mockAnchorPeerTracker struct {
    86  	apEndpoints []string
    87  }
    88  
    89  func (m *mockAnchorPeerTracker) IsAnchorPeer(endpoint string) bool {
    90  	return util.Contains(endpoint, m.apEndpoints)
    91  }
    92  
    93  type dummyCommModule struct {
    94  	validatedMessages chan *protoext.SignedGossipMessage
    95  	msgsReceived      uint32
    96  	msgsSent          uint32
    97  	id                string
    98  	identitySwitch    chan common.PKIidType
    99  	presumeDead       chan common.PKIidType
   100  	detectedDead      chan string
   101  	streams           map[string]proto.Gossip_GossipStreamClient
   102  	conns             map[string]*grpc.ClientConn
   103  	lock              *sync.RWMutex
   104  	incMsgs           chan protoext.ReceivedMessage
   105  	lastSeqs          map[string]uint64
   106  	shouldGossip      bool
   107  	disableComm       bool
   108  	mock              *mock.Mock
   109  	signCount         uint32
   110  }
   111  
   112  type gossipInstance struct {
   113  	msgInterceptor func(*protoext.SignedGossipMessage)
   114  	comm           *dummyCommModule
   115  	Discovery
   116  	gRGCserv      *grpc.Server
   117  	lsnr          net.Listener
   118  	shouldGossip  bool
   119  	syncInitiator *time.Ticker
   120  	stopChan      chan struct{}
   121  	port          int
   122  }
   123  
   124  func (comm *dummyCommModule) ValidateAliveMsg(am *protoext.SignedGossipMessage) bool {
   125  	comm.lock.RLock()
   126  	c := comm.validatedMessages
   127  	comm.lock.RUnlock()
   128  
   129  	if c != nil {
   130  		c <- am
   131  	}
   132  	return true
   133  }
   134  
   135  func (comm *dummyCommModule) IdentitySwitch() <-chan common.PKIidType {
   136  	return comm.identitySwitch
   137  }
   138  
   139  func (comm *dummyCommModule) recordValidation(validatedMessages chan *protoext.SignedGossipMessage) {
   140  	comm.lock.Lock()
   141  	defer comm.lock.Unlock()
   142  	comm.validatedMessages = validatedMessages
   143  }
   144  
   145  func (comm *dummyCommModule) SignMessage(am *proto.GossipMessage, internalEndpoint string) *proto.Envelope {
   146  	atomic.AddUint32(&comm.signCount, 1)
   147  	protoext.NoopSign(am)
   148  
   149  	secret := &proto.Secret{
   150  		Content: &proto.Secret_InternalEndpoint{
   151  			InternalEndpoint: internalEndpoint,
   152  		},
   153  	}
   154  	signer := func(msg []byte) ([]byte, error) {
   155  		return nil, nil
   156  	}
   157  	s, _ := protoext.NoopSign(am)
   158  	env := s.Envelope
   159  	protoext.SignSecret(env, signer, secret)
   160  	return env
   161  }
   162  
   163  func (comm *dummyCommModule) Gossip(msg *protoext.SignedGossipMessage) {
   164  	if !comm.shouldGossip || comm.disableComm {
   165  		return
   166  	}
   167  	comm.lock.Lock()
   168  	defer comm.lock.Unlock()
   169  	for _, conn := range comm.streams {
   170  		conn.Send(msg.Envelope)
   171  	}
   172  }
   173  
   174  func (comm *dummyCommModule) Forward(msg protoext.ReceivedMessage) {
   175  	if !comm.shouldGossip || comm.disableComm {
   176  		return
   177  	}
   178  	comm.lock.Lock()
   179  	defer comm.lock.Unlock()
   180  	for _, conn := range comm.streams {
   181  		conn.Send(msg.GetGossipMessage().Envelope)
   182  	}
   183  }
   184  
   185  func (comm *dummyCommModule) SendToPeer(peer *NetworkMember, msg *protoext.SignedGossipMessage) {
   186  	if comm.disableComm {
   187  		return
   188  	}
   189  	comm.lock.RLock()
   190  	_, exists := comm.streams[peer.Endpoint]
   191  	mock := comm.mock
   192  	comm.lock.RUnlock()
   193  
   194  	if mock != nil {
   195  		mock.Called(peer, msg)
   196  	}
   197  
   198  	if !exists {
   199  		if comm.Ping(peer) == false {
   200  			fmt.Printf("Ping to %v failed\n", peer.Endpoint)
   201  			return
   202  		}
   203  	}
   204  	comm.lock.Lock()
   205  	s, _ := protoext.NoopSign(msg.GossipMessage)
   206  	comm.streams[peer.Endpoint].Send(s.Envelope)
   207  	comm.lock.Unlock()
   208  	atomic.AddUint32(&comm.msgsSent, 1)
   209  }
   210  
   211  func (comm *dummyCommModule) Ping(peer *NetworkMember) bool {
   212  	if comm.disableComm {
   213  		return false
   214  	}
   215  	comm.lock.Lock()
   216  	defer comm.lock.Unlock()
   217  
   218  	if comm.mock != nil {
   219  		comm.mock.Called()
   220  	}
   221  
   222  	_, alreadyExists := comm.streams[peer.Endpoint]
   223  	conn := comm.conns[peer.Endpoint]
   224  	if !alreadyExists || conn.GetState() == connectivity.Shutdown {
   225  		newConn, err := grpc.Dial(peer.Endpoint, grpc.WithInsecure())
   226  		if err != nil {
   227  			return false
   228  		}
   229  		if stream, err := proto.NewGossipClient(newConn).GossipStream(context.Background()); err == nil {
   230  			comm.conns[peer.Endpoint] = newConn
   231  			comm.streams[peer.Endpoint] = stream
   232  			return true
   233  		}
   234  		return false
   235  	}
   236  	if _, err := proto.NewGossipClient(conn).Ping(context.Background(), &proto.Empty{}); err != nil {
   237  		return false
   238  	}
   239  	return true
   240  }
   241  
   242  func (comm *dummyCommModule) Accept() <-chan protoext.ReceivedMessage {
   243  	return comm.incMsgs
   244  }
   245  
   246  func (comm *dummyCommModule) PresumedDead() <-chan common.PKIidType {
   247  	return comm.presumeDead
   248  }
   249  
   250  func (comm *dummyCommModule) CloseConn(peer *NetworkMember) {
   251  	comm.lock.Lock()
   252  	defer comm.lock.Unlock()
   253  
   254  	if _, exists := comm.streams[peer.Endpoint]; !exists {
   255  		return
   256  	}
   257  
   258  	comm.streams[peer.Endpoint].CloseSend()
   259  	comm.conns[peer.Endpoint].Close()
   260  }
   261  
   262  func (g *gossipInstance) receivedMsgCount() int {
   263  	return int(atomic.LoadUint32(&g.comm.msgsReceived))
   264  }
   265  
   266  func (g *gossipInstance) sentMsgCount() int {
   267  	return int(atomic.LoadUint32(&g.comm.msgsSent))
   268  }
   269  
   270  func (g *gossipInstance) discoveryImpl() *gossipDiscoveryImpl {
   271  	return g.Discovery.(*gossipDiscoveryImpl)
   272  }
   273  
   274  func (g *gossipInstance) initiateSync(frequency time.Duration, peerNum int) {
   275  	g.syncInitiator = time.NewTicker(frequency)
   276  	g.stopChan = make(chan struct{})
   277  	go func() {
   278  		for {
   279  			select {
   280  			case <-g.syncInitiator.C:
   281  				g.Discovery.InitiateSync(peerNum)
   282  			case <-g.stopChan:
   283  				g.syncInitiator.Stop()
   284  				return
   285  			}
   286  		}
   287  	}()
   288  }
   289  
   290  func (g *gossipInstance) GossipStream(stream proto.Gossip_GossipStreamServer) error {
   291  	for {
   292  		envelope, err := stream.Recv()
   293  		if err == io.EOF {
   294  			return nil
   295  		}
   296  		if err != nil {
   297  			return err
   298  		}
   299  		lgr := g.Discovery.(*gossipDiscoveryImpl).logger
   300  		gMsg, err := protoext.EnvelopeToGossipMessage(envelope)
   301  		if err != nil {
   302  			lgr.Warning("Failed deserializing GossipMessage from envelope:", err)
   303  			continue
   304  		}
   305  		g.msgInterceptor(gMsg)
   306  
   307  		lgr.Debug(g.Discovery.Self().Endpoint, "Got message:", gMsg)
   308  		g.comm.incMsgs <- &dummyReceivedMessage{
   309  			msg: gMsg,
   310  			info: &protoext.ConnectionInfo{
   311  				ID: common.PKIidType("testID"),
   312  			},
   313  		}
   314  		atomic.AddUint32(&g.comm.msgsReceived, 1)
   315  
   316  		if aliveMsg := gMsg.GetAliveMsg(); aliveMsg != nil {
   317  			g.tryForwardMessage(gMsg)
   318  		}
   319  	}
   320  }
   321  
   322  func (g *gossipInstance) tryForwardMessage(msg *protoext.SignedGossipMessage) {
   323  	g.comm.lock.Lock()
   324  
   325  	aliveMsg := msg.GetAliveMsg()
   326  
   327  	forward := false
   328  	id := string(aliveMsg.Membership.PkiId)
   329  	seqNum := aliveMsg.Timestamp.SeqNum
   330  	if last, exists := g.comm.lastSeqs[id]; exists {
   331  		if last < seqNum {
   332  			g.comm.lastSeqs[id] = seqNum
   333  			forward = true
   334  		}
   335  	} else {
   336  		g.comm.lastSeqs[id] = seqNum
   337  		forward = true
   338  	}
   339  
   340  	g.comm.lock.Unlock()
   341  
   342  	if forward {
   343  		g.comm.Gossip(msg)
   344  	}
   345  }
   346  
   347  func (g *gossipInstance) Stop() {
   348  	if g.syncInitiator != nil {
   349  		g.stopChan <- struct{}{}
   350  	}
   351  	g.gRGCserv.Stop()
   352  	g.lsnr.Close()
   353  	g.comm.lock.Lock()
   354  	for _, stream := range g.comm.streams {
   355  		stream.CloseSend()
   356  	}
   357  	g.comm.lock.Unlock()
   358  	for _, conn := range g.comm.conns {
   359  		conn.Close()
   360  	}
   361  	g.Discovery.Stop()
   362  }
   363  
   364  func (g *gossipInstance) Ping(context.Context, *proto.Empty) (*proto.Empty, error) {
   365  	return &proto.Empty{}, nil
   366  }
   367  
   368  var noopPolicy = func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
   369  	return func(msg *protoext.SignedGossipMessage) bool {
   370  			return true
   371  		}, func(message *protoext.SignedGossipMessage) *proto.Envelope {
   372  			return message.Envelope
   373  		}
   374  }
   375  
   376  func createDiscoveryInstance(port int, id string, bootstrapPeers []string) *gossipInstance {
   377  	return createDiscoveryInstanceCustomConfig(port, id, bootstrapPeers, defaultTestConfig)
   378  }
   379  
   380  func createDiscoveryInstanceCustomConfig(port int, id string, bootstrapPeers []string, config DiscoveryConfig) *gossipInstance {
   381  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, true, noopPolicy, config)
   382  }
   383  
   384  func createDiscoveryInstanceWithNoGossip(port int, id string, bootstrapPeers []string) *gossipInstance {
   385  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, noopPolicy, defaultTestConfig)
   386  }
   387  
   388  func createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(port int, id string, bootstrapPeers []string, pol DisclosurePolicy) *gossipInstance {
   389  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, pol, defaultTestConfig)
   390  }
   391  
   392  func createDiscoveryInstanceThatGossips(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy, config DiscoveryConfig) *gossipInstance {
   393  	return createDiscoveryInstanceThatGossipsWithInterceptors(port, id, bootstrapPeers, shouldGossip, pol, func(_ *protoext.SignedGossipMessage) {}, config)
   394  }
   395  
   396  func createDiscoveryInstanceThatGossipsWithInterceptors(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy, f func(*protoext.SignedGossipMessage), config DiscoveryConfig) *gossipInstance {
   397  	mockTracker := &mockAnchorPeerTracker{}
   398  	return createDiscoveryInstanceWithAnchorPeerTracker(port, id, bootstrapPeers, shouldGossip, pol, f, config, mockTracker, nil)
   399  }
   400  
   401  func createDiscoveryInstanceWithAnchorPeerTracker(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy,
   402  	f func(*protoext.SignedGossipMessage), config DiscoveryConfig, anchorPeerTracker AnchorPeerTracker, logger util.Logger) *gossipInstance {
   403  	comm := &dummyCommModule{
   404  		conns:          make(map[string]*grpc.ClientConn),
   405  		streams:        make(map[string]proto.Gossip_GossipStreamClient),
   406  		incMsgs:        make(chan protoext.ReceivedMessage, 1000),
   407  		presumeDead:    make(chan common.PKIidType, 10000),
   408  		id:             id,
   409  		detectedDead:   make(chan string, 10000),
   410  		identitySwitch: make(chan common.PKIidType),
   411  		lock:           &sync.RWMutex{},
   412  		lastSeqs:       make(map[string]uint64),
   413  		shouldGossip:   shouldGossip,
   414  		disableComm:    false,
   415  	}
   416  
   417  	endpoint := fmt.Sprintf("localhost:%d", port)
   418  	self := NetworkMember{
   419  		Metadata:         []byte{},
   420  		PKIid:            []byte(endpoint),
   421  		Endpoint:         endpoint,
   422  		InternalEndpoint: endpoint,
   423  	}
   424  
   425  	listenAddress := fmt.Sprintf("%s:%d", "", port)
   426  	ll, err := net.Listen("tcp", listenAddress)
   427  	if err != nil {
   428  		errMsg := fmt.Sprintf("Failed creating listener on address %v for gossip instance: %v", listenAddress, err)
   429  		panic(errMsg)
   430  	}
   431  	s := grpc.NewServer()
   432  
   433  	config.BootstrapPeers = bootstrapPeers
   434  
   435  	if logger == nil {
   436  		logger = util.GetLogger(util.DiscoveryLogger, self.InternalEndpoint)
   437  	}
   438  	discSvc := NewDiscoveryService(self, comm, comm, pol, config, anchorPeerTracker, logger)
   439  	for _, bootPeer := range bootstrapPeers {
   440  		bp := bootPeer
   441  		discSvc.Connect(NetworkMember{Endpoint: bp, InternalEndpoint: bootPeer}, func() (*PeerIdentification, error) {
   442  			return &PeerIdentification{SelfOrg: true, ID: common.PKIidType(bp)}, nil
   443  		})
   444  	}
   445  
   446  	gossInst := &gossipInstance{comm: comm, gRGCserv: s, Discovery: discSvc, lsnr: ll, shouldGossip: shouldGossip, port: port, msgInterceptor: f}
   447  
   448  	proto.RegisterGossipServer(s, gossInst)
   449  	go s.Serve(ll)
   450  
   451  	return gossInst
   452  }
   453  
   454  func bootPeer(port int) string {
   455  	return fmt.Sprintf("localhost:%d", port)
   456  }
   457  
   458  func TestClone(t *testing.T) {
   459  	nm := &NetworkMember{
   460  		PKIid: common.PKIidType("abc"),
   461  		Properties: &proto.Properties{
   462  			LedgerHeight: 1,
   463  			LeftChannel:  true,
   464  		},
   465  		Envelope: &proto.Envelope{
   466  			Payload: []byte("payload"),
   467  		},
   468  		InternalEndpoint: "internal",
   469  		Metadata:         []byte{1, 2, 3},
   470  		Endpoint:         "endpoint",
   471  	}
   472  
   473  	nm2 := nm.Clone()
   474  	require.Equal(t, *nm, nm2, "Clones are different")
   475  	require.False(t, nm.Properties == nm2.Properties, "Cloning should be deep and not shallow")
   476  	require.False(t, nm.Envelope == nm2.Envelope, "Cloning should be deep and not shallow")
   477  }
   478  
   479  func TestHasExternalEndpoints(t *testing.T) {
   480  	memberWithEndpoint := NetworkMember{Endpoint: "foo"}
   481  	memberWithoutEndpoint := NetworkMember{}
   482  
   483  	require.True(t, HasExternalEndpoint(memberWithEndpoint))
   484  	require.False(t, HasExternalEndpoint(memberWithoutEndpoint))
   485  }
   486  
   487  func TestToString(t *testing.T) {
   488  	nm := NetworkMember{
   489  		Endpoint:         "a",
   490  		InternalEndpoint: "b",
   491  	}
   492  	require.Equal(t, "b", nm.PreferredEndpoint())
   493  	nm = NetworkMember{
   494  		Endpoint: "a",
   495  	}
   496  	require.Equal(t, "a", nm.PreferredEndpoint())
   497  
   498  	now := time.Now()
   499  	ts := &timestamp{
   500  		incTime: now,
   501  		seqNum:  uint64(42),
   502  	}
   503  	require.Equal(t, fmt.Sprintf("%d, %d", now.UnixNano(), 42), fmt.Sprint(ts))
   504  }
   505  
   506  func TestNetworkMemberString(t *testing.T) {
   507  	tests := []struct {
   508  		input    NetworkMember
   509  		expected string
   510  	}{
   511  		{
   512  			input:    NetworkMember{Endpoint: "endpoint", InternalEndpoint: "internal-endpoint", PKIid: common.PKIidType{0, 1, 2, 3}, Metadata: nil},
   513  			expected: "Endpoint: endpoint, InternalEndpoint: internal-endpoint, PKI-ID: 00010203, Metadata: ",
   514  		},
   515  		{
   516  			input:    NetworkMember{Endpoint: "endpoint", InternalEndpoint: "internal-endpoint", PKIid: common.PKIidType{0, 1, 2, 3}, Metadata: []byte{4, 5, 6, 7}},
   517  			expected: "Endpoint: endpoint, InternalEndpoint: internal-endpoint, PKI-ID: 00010203, Metadata: 04050607",
   518  		},
   519  	}
   520  	for _, tt := range tests {
   521  		require.Equal(t, tt.expected, tt.input.String())
   522  	}
   523  }
   524  
   525  func TestBadInput(t *testing.T) {
   526  	inst := createDiscoveryInstance(2048, fmt.Sprintf("d%d", 0), []string{})
   527  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(nil)
   528  	s, _ := protoext.NoopSign(&proto.GossipMessage{
   529  		Content: &proto.GossipMessage_DataMsg{
   530  			DataMsg: &proto.DataMessage{},
   531  		},
   532  	})
   533  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(&dummyReceivedMessage{
   534  		msg: s,
   535  		info: &protoext.ConnectionInfo{
   536  			ID: common.PKIidType("testID"),
   537  		},
   538  	})
   539  }
   540  
   541  func TestConnect(t *testing.T) {
   542  	nodeNum := 10
   543  	instances := []*gossipInstance{}
   544  	firstSentMemReqMsgs := make(chan *protoext.SignedGossipMessage, nodeNum)
   545  	for i := 0; i < nodeNum; i++ {
   546  		inst := createDiscoveryInstance(7611+i, fmt.Sprintf("d%d", i), []string{})
   547  
   548  		inst.comm.lock.Lock()
   549  		inst.comm.mock = &mock.Mock{}
   550  		inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   551  			inst := inst
   552  			msg := arguments.Get(1).(*protoext.SignedGossipMessage)
   553  			if req := msg.GetMemReq(); req != nil {
   554  				selfMsg, _ := protoext.EnvelopeToGossipMessage(req.SelfInformation)
   555  				firstSentMemReqMsgs <- selfMsg
   556  				inst.comm.lock.Lock()
   557  				inst.comm.mock = nil
   558  				inst.comm.lock.Unlock()
   559  			}
   560  		})
   561  		inst.comm.mock.On("Ping", mock.Anything)
   562  		inst.comm.lock.Unlock()
   563  		instances = append(instances, inst)
   564  		j := (i + 1) % 10
   565  		endpoint := fmt.Sprintf("localhost:%d", 7611+j)
   566  		netMember2Connect2 := NetworkMember{Endpoint: endpoint, PKIid: []byte(endpoint)}
   567  		inst.Connect(netMember2Connect2, func() (identification *PeerIdentification, err error) {
   568  			return &PeerIdentification{SelfOrg: false, ID: nil}, nil
   569  		})
   570  	}
   571  
   572  	time.Sleep(time.Second * 3)
   573  	fullMembership := func() bool {
   574  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   575  	}
   576  	waitUntilOrFail(t, fullMembership)
   577  
   578  	discInst := instances[rand.Intn(len(instances))].Discovery.(*gossipDiscoveryImpl)
   579  	mr, _ := discInst.createMembershipRequest(true)
   580  	am, _ := protoext.EnvelopeToGossipMessage(mr.GetMemReq().SelfInformation)
   581  	require.NotNil(t, am.SecretEnvelope)
   582  	mr2, _ := discInst.createMembershipRequest(false)
   583  	am, _ = protoext.EnvelopeToGossipMessage(mr2.GetMemReq().SelfInformation)
   584  	require.Nil(t, am.SecretEnvelope)
   585  	stopInstances(t, instances)
   586  	require.Len(t, firstSentMemReqMsgs, 10)
   587  	close(firstSentMemReqMsgs)
   588  	for firstSentSelfMsg := range firstSentMemReqMsgs {
   589  		require.Nil(t, firstSentSelfMsg.Envelope.SecretEnvelope)
   590  	}
   591  }
   592  
   593  func TestNoSigningIfNoMembership(t *testing.T) {
   594  	t.Parallel()
   595  
   596  	inst := createDiscoveryInstance(8931, "foreveralone", nil)
   597  	defer inst.Stop()
   598  	time.Sleep(defaultTestConfig.AliveTimeInterval * 10)
   599  	assert.Zero(t, atomic.LoadUint32(&inst.comm.signCount))
   600  
   601  	inst.InitiateSync(10000)
   602  	assert.Zero(t, atomic.LoadUint32(&inst.comm.signCount))
   603  }
   604  
   605  func TestValidation(t *testing.T) {
   606  	// Scenarios: This test contains the following sub-tests:
   607  	// 1) alive message validation: a message is validated <==> it entered the message store
   608  	// 2) request/response message validation:
   609  	//   2.1) alive messages from membership requests/responses are validated.
   610  	//   2.2) once alive messages enter the message store, reception of them via membership responses
   611  	//        doesn't trigger validation, but via membership requests - do.
   612  
   613  	wrapReceivedMessage := func(msg *protoext.SignedGossipMessage) protoext.ReceivedMessage {
   614  		return &dummyReceivedMessage{
   615  			msg: msg,
   616  			info: &protoext.ConnectionInfo{
   617  				ID: common.PKIidType("testID"),
   618  			},
   619  		}
   620  	}
   621  
   622  	requestMessagesReceived := make(chan *protoext.SignedGossipMessage, 100)
   623  	responseMessagesReceived := make(chan *protoext.SignedGossipMessage, 100)
   624  	aliveMessagesReceived := make(chan *protoext.SignedGossipMessage, 5000)
   625  
   626  	var membershipRequest atomic.Value
   627  	var membershipResponseWithAlivePeers atomic.Value
   628  	var membershipResponseWithDeadPeers atomic.Value
   629  
   630  	recordMembershipRequest := func(req *protoext.SignedGossipMessage) {
   631  		msg, _ := protoext.EnvelopeToGossipMessage(req.GetMemReq().SelfInformation)
   632  		membershipRequest.Store(req)
   633  		requestMessagesReceived <- msg
   634  	}
   635  
   636  	recordMembershipResponse := func(res *protoext.SignedGossipMessage) {
   637  		memRes := res.GetMemRes()
   638  		if len(memRes.GetAlive()) > 0 {
   639  			membershipResponseWithAlivePeers.Store(res)
   640  		}
   641  		if len(memRes.GetDead()) > 0 {
   642  			membershipResponseWithDeadPeers.Store(res)
   643  		}
   644  		responseMessagesReceived <- res
   645  	}
   646  
   647  	interceptor := func(msg *protoext.SignedGossipMessage) {
   648  		if memReq := msg.GetMemReq(); memReq != nil {
   649  			recordMembershipRequest(msg)
   650  			return
   651  		}
   652  
   653  		if memRes := msg.GetMemRes(); memRes != nil {
   654  			recordMembershipResponse(msg)
   655  			return
   656  		}
   657  		// Else, it's an alive message
   658  		aliveMessagesReceived <- msg
   659  	}
   660  
   661  	// p3 is the boot peer of p1, and p1 is the boot peer of p2.
   662  	// p1 sends a (membership) request to p3, and receives a (membership) response back.
   663  	// p2 sends a (membership) request to p1.
   664  	// Therefore, p1 receives both a membership request and a response.
   665  	p1 := createDiscoveryInstanceThatGossipsWithInterceptors(4675, "p1", []string{bootPeer(4677)}, true, noopPolicy, interceptor, defaultTestConfig)
   666  	p2 := createDiscoveryInstance(4676, "p2", []string{bootPeer(4675)})
   667  	p3 := createDiscoveryInstance(4677, "p3", nil)
   668  	instances := []*gossipInstance{p1, p2, p3}
   669  
   670  	assertMembership(t, instances, 2)
   671  
   672  	instances = []*gossipInstance{p1, p2}
   673  	// Stop p3 and wait until its death is detected
   674  	p3.Stop()
   675  	assertMembership(t, instances, 1)
   676  	// Force p1 to send a membership request so it can receive back a response
   677  	// with dead peers.
   678  	p1.InitiateSync(1)
   679  
   680  	// Wait until a response with a dead peer is received
   681  	waitUntilOrFail(t, func() bool {
   682  		return membershipResponseWithDeadPeers.Load() != nil
   683  	})
   684  
   685  	p1.Stop()
   686  	p2.Stop()
   687  
   688  	close(aliveMessagesReceived)
   689  	t.Log("Recorded", len(aliveMessagesReceived), "alive messages")
   690  	t.Log("Recorded", len(requestMessagesReceived), "request messages")
   691  	t.Log("Recorded", len(responseMessagesReceived), "response messages")
   692  
   693  	// Ensure we got alive messages from membership requests and from membership responses
   694  	require.NotNil(t, membershipResponseWithAlivePeers.Load())
   695  	require.NotNil(t, membershipRequest.Load())
   696  
   697  	t.Run("alive message", func(t *testing.T) {
   698  		// Spawn a new peer - p4
   699  		p4 := createDiscoveryInstance(4678, "p1", nil)
   700  		defer p4.Stop()
   701  		// Record messages validated
   702  		validatedMessages := make(chan *protoext.SignedGossipMessage, 5000)
   703  		p4.comm.recordValidation(validatedMessages)
   704  		tmpMsgs := make(chan *protoext.SignedGossipMessage, 5000)
   705  		// Replay the messages sent to p1 into p4, and also save them into a temporary channel
   706  		for msg := range aliveMessagesReceived {
   707  			p4.comm.incMsgs <- wrapReceivedMessage(msg)
   708  			tmpMsgs <- msg
   709  		}
   710  
   711  		// Simulate the messages received by p4 into the message store
   712  		policy := protoext.NewGossipMessageComparator(0)
   713  		msgStore := msgstore.NewMessageStore(policy, func(_ interface{}) {})
   714  		close(tmpMsgs)
   715  		for msg := range tmpMsgs {
   716  			if msgStore.Add(msg) {
   717  				// Ensure the message was verified if it can be added into the message store
   718  				expectedMessage := <-validatedMessages
   719  				require.Equal(t, expectedMessage, msg)
   720  			}
   721  		}
   722  		// Ensure we didn't validate any other messages.
   723  		require.Empty(t, validatedMessages)
   724  	})
   725  
   726  	req := membershipRequest.Load().(*protoext.SignedGossipMessage)
   727  	res := membershipResponseWithDeadPeers.Load().(*protoext.SignedGossipMessage)
   728  	// Ensure the membership response contains both alive and dead peers
   729  	require.Len(t, res.GetMemRes().GetAlive(), 2)
   730  	require.Len(t, res.GetMemRes().GetDead(), 1)
   731  
   732  	for _, testCase := range []struct {
   733  		name                  string
   734  		expectedAliveMessages int
   735  		port                  int
   736  		message               *protoext.SignedGossipMessage
   737  		shouldBeReValidated   bool
   738  	}{
   739  		{
   740  			name:                  "membership request",
   741  			expectedAliveMessages: 1,
   742  			message:               req,
   743  			port:                  4679,
   744  			shouldBeReValidated:   true,
   745  		},
   746  		{
   747  			name:                  "membership response",
   748  			expectedAliveMessages: 3,
   749  			message:               res,
   750  			port:                  4680,
   751  		},
   752  	} {
   753  		testCase := testCase
   754  		t.Run(testCase.name, func(t *testing.T) {
   755  			p := createDiscoveryInstance(testCase.port, "p", nil)
   756  			defer p.Stop()
   757  			// Record messages validated
   758  			validatedMessages := make(chan *protoext.SignedGossipMessage, testCase.expectedAliveMessages)
   759  			p.comm.recordValidation(validatedMessages)
   760  
   761  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   762  			// Ensure all messages were validated
   763  			for i := 0; i < testCase.expectedAliveMessages; i++ {
   764  				validatedMsg := <-validatedMessages
   765  				// send the message directly to be included in the message store
   766  				p.comm.incMsgs <- wrapReceivedMessage(validatedMsg)
   767  			}
   768  			// Wait for the messages to be validated
   769  			for i := 0; i < testCase.expectedAliveMessages; i++ {
   770  				<-validatedMessages
   771  			}
   772  			// Not more than testCase.expectedAliveMessages should have been validated
   773  			require.Empty(t, validatedMessages)
   774  
   775  			if !testCase.shouldBeReValidated {
   776  				// Re-submit the message twice and ensure it wasn't validated.
   777  				// If it is validated, panic would occur because an enqueue to the validatesMessages channel
   778  				// would be attempted and the channel is closed.
   779  				close(validatedMessages)
   780  			}
   781  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   782  			p.comm.incMsgs <- wrapReceivedMessage(testCase.message)
   783  			// Wait until the size of the channel is zero. It means at least one message was processed.
   784  			waitUntilOrFail(t, func() bool {
   785  				return len(p.comm.incMsgs) == 0
   786  			})
   787  		})
   788  	}
   789  }
   790  
   791  func TestUpdate(t *testing.T) {
   792  	nodeNum := 5
   793  	bootPeers := []string{bootPeer(6611), bootPeer(6612)}
   794  	instances := []*gossipInstance{}
   795  
   796  	inst := createDiscoveryInstance(6611, "d1", bootPeers)
   797  	instances = append(instances, inst)
   798  
   799  	inst = createDiscoveryInstance(6612, "d2", bootPeers)
   800  	instances = append(instances, inst)
   801  
   802  	for i := 3; i <= nodeNum; i++ {
   803  		id := fmt.Sprintf("d%d", i)
   804  		inst = createDiscoveryInstance(6610+i, id, bootPeers)
   805  		instances = append(instances, inst)
   806  	}
   807  
   808  	fullMembership := func() bool {
   809  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   810  	}
   811  
   812  	waitUntilOrFail(t, fullMembership)
   813  
   814  	instances[0].UpdateMetadata([]byte("bla bla"))
   815  	instances[nodeNum-1].UpdateEndpoint("localhost:5511")
   816  
   817  	checkMembership := func() bool {
   818  		for _, member := range instances[nodeNum-1].GetMembership() {
   819  			if string(member.PKIid) == instances[0].comm.id {
   820  				if string(member.Metadata) != "bla bla" {
   821  					return false
   822  				}
   823  			}
   824  		}
   825  
   826  		for _, member := range instances[0].GetMembership() {
   827  			if string(member.PKIid) == instances[nodeNum-1].comm.id {
   828  				if member.Endpoint != "localhost:5511" {
   829  					return false
   830  				}
   831  			}
   832  		}
   833  		return true
   834  	}
   835  
   836  	waitUntilOrFail(t, checkMembership)
   837  	stopInstances(t, instances)
   838  }
   839  
   840  func TestInitiateSync(t *testing.T) {
   841  	nodeNum := 10
   842  	bootPeers := []string{bootPeer(3611), bootPeer(3612)}
   843  	instances := []*gossipInstance{}
   844  
   845  	toDie := int32(0)
   846  	for i := 1; i <= nodeNum; i++ {
   847  		id := fmt.Sprintf("d%d", i)
   848  		inst := createDiscoveryInstanceWithNoGossip(3610+i, id, bootPeers)
   849  		instances = append(instances, inst)
   850  		go func() {
   851  			for {
   852  				if atomic.LoadInt32(&toDie) == int32(1) {
   853  					return
   854  				}
   855  				time.Sleep(defaultTestConfig.AliveExpirationTimeout / 3)
   856  				inst.InitiateSync(9)
   857  			}
   858  		}()
   859  	}
   860  	time.Sleep(defaultTestConfig.AliveExpirationTimeout * 4)
   861  	assertMembership(t, instances, nodeNum-1)
   862  	atomic.StoreInt32(&toDie, int32(1))
   863  	stopInstances(t, instances)
   864  }
   865  
   866  func TestSelf(t *testing.T) {
   867  	inst := createDiscoveryInstance(13463, "d1", []string{})
   868  	defer inst.Stop()
   869  	env := inst.Self().Envelope
   870  	sMsg, err := protoext.EnvelopeToGossipMessage(env)
   871  	require.NoError(t, err)
   872  	member := sMsg.GetAliveMsg().Membership
   873  	require.Equal(t, "localhost:13463", member.Endpoint)
   874  	require.Equal(t, []byte("localhost:13463"), member.PkiId)
   875  
   876  	require.Equal(t, "localhost:13463", inst.Self().Endpoint)
   877  	require.Equal(t, common.PKIidType("localhost:13463"), inst.Self().PKIid)
   878  }
   879  
   880  func TestExpiration(t *testing.T) {
   881  	nodeNum := 5
   882  	bootPeers := []string{bootPeer(2611), bootPeer(2612)}
   883  	instances := []*gossipInstance{}
   884  
   885  	inst := createDiscoveryInstance(2611, "d1", bootPeers)
   886  	instances = append(instances, inst)
   887  
   888  	inst = createDiscoveryInstance(2612, "d2", bootPeers)
   889  	instances = append(instances, inst)
   890  
   891  	for i := 3; i <= nodeNum; i++ {
   892  		id := fmt.Sprintf("d%d", i)
   893  		inst = createDiscoveryInstance(2610+i, id, bootPeers)
   894  		instances = append(instances, inst)
   895  	}
   896  
   897  	assertMembership(t, instances, nodeNum-1)
   898  
   899  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
   900  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
   901  
   902  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
   903  
   904  	stopAction := &sync.WaitGroup{}
   905  	for i, inst := range instances {
   906  		if i+2 == nodeNum {
   907  			break
   908  		}
   909  		stopAction.Add(1)
   910  		go func(inst *gossipInstance) {
   911  			defer stopAction.Done()
   912  			inst.Stop()
   913  		}(inst)
   914  	}
   915  
   916  	waitUntilOrFailBlocking(t, stopAction.Wait)
   917  }
   918  
   919  func TestGetFullMembership(t *testing.T) {
   920  	nodeNum := 15
   921  	bootPeers := []string{bootPeer(5511), bootPeer(5512)}
   922  	instances := []*gossipInstance{}
   923  	var inst *gossipInstance
   924  
   925  	for i := 3; i <= nodeNum; i++ {
   926  		id := fmt.Sprintf("d%d", i)
   927  		inst = createDiscoveryInstance(5510+i, id, bootPeers)
   928  		instances = append(instances, inst)
   929  	}
   930  
   931  	time.Sleep(time.Second)
   932  
   933  	inst = createDiscoveryInstance(5511, "d1", bootPeers)
   934  	instances = append(instances, inst)
   935  
   936  	inst = createDiscoveryInstance(5512, "d2", bootPeers)
   937  	instances = append(instances, inst)
   938  
   939  	assertMembership(t, instances, nodeNum-1)
   940  
   941  	// Ensure that internal endpoint was propagated to everyone
   942  	for _, inst := range instances {
   943  		for _, member := range inst.GetMembership() {
   944  			require.NotEmpty(t, member.InternalEndpoint)
   945  			require.NotEmpty(t, member.Endpoint)
   946  		}
   947  	}
   948  
   949  	// Check that Lookup() is valid
   950  	for _, inst := range instances {
   951  		for _, member := range inst.GetMembership() {
   952  			require.Equal(t, string(member.PKIid), inst.Lookup(member.PKIid).Endpoint)
   953  			require.Equal(t, member.PKIid, inst.Lookup(member.PKIid).PKIid)
   954  		}
   955  	}
   956  
   957  	stopInstances(t, instances)
   958  }
   959  
   960  func TestGossipDiscoveryStopping(t *testing.T) {
   961  	inst := createDiscoveryInstance(9611, "d1", []string{bootPeer(9611)})
   962  	time.Sleep(time.Second)
   963  	waitUntilOrFailBlocking(t, inst.Stop)
   964  }
   965  
   966  func TestGossipDiscoverySkipConnectingToLocalhostBootstrap(t *testing.T) {
   967  	inst := createDiscoveryInstance(11611, "d1", []string{"localhost:11611", "127.0.0.1:11611"})
   968  	inst.comm.lock.Lock()
   969  	inst.comm.mock = &mock.Mock{}
   970  	inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(mock.Arguments) {
   971  		t.Fatal("Should not have connected to any peer")
   972  	})
   973  	inst.comm.mock.On("Ping", mock.Anything).Run(func(mock.Arguments) {
   974  		t.Fatal("Should not have connected to any peer")
   975  	})
   976  	inst.comm.lock.Unlock()
   977  	time.Sleep(time.Second * 3)
   978  	waitUntilOrFailBlocking(t, inst.Stop)
   979  }
   980  
   981  func TestConvergence(t *testing.T) {
   982  	// scenario:
   983  	// {boot peer: [peer list]}
   984  	// {d1: d2, d3, d4}
   985  	// {d5: d6, d7, d8}
   986  	// {d9: d10, d11, d12}
   987  	// connect all boot peers with d13
   988  	// take down d13
   989  	// ensure still full membership
   990  	instances := []*gossipInstance{}
   991  	for _, i := range []int{1, 5, 9} {
   992  		bootPort := 4610 + i
   993  		id := fmt.Sprintf("d%d", i)
   994  		leader := createDiscoveryInstance(bootPort, id, []string{})
   995  		instances = append(instances, leader)
   996  		for minionIndex := 1; minionIndex <= 3; minionIndex++ {
   997  			id := fmt.Sprintf("d%d", i+minionIndex)
   998  			minion := createDiscoveryInstance(4610+minionIndex+i, id, []string{bootPeer(bootPort)})
   999  			instances = append(instances, minion)
  1000  		}
  1001  	}
  1002  
  1003  	assertMembership(t, instances, 3)
  1004  	connector := createDiscoveryInstance(4623, "d13", []string{bootPeer(4611), bootPeer(4615), bootPeer(4619)})
  1005  	instances = append(instances, connector)
  1006  	assertMembership(t, instances, 12)
  1007  	connector.Stop()
  1008  	instances = instances[:len(instances)-1]
  1009  	assertMembership(t, instances, 11)
  1010  	stopInstances(t, instances)
  1011  }
  1012  
  1013  func TestDisclosurePolicyWithPull(t *testing.T) {
  1014  	// Scenario: run 2 groups of peers that simulate 2 organizations:
  1015  	// {p0, p1, p2, p3, p4}
  1016  	// {p5, p6, p7, p8, p9}
  1017  	// Only peers that have an even id have external addresses
  1018  	// and only these peers should be published to peers of the other group,
  1019  	// while the only ones that need to know about them are peers
  1020  	// that have an even id themselves.
  1021  	// Furthermore, peers in different sets, should not know about internal addresses of
  1022  	// other peers.
  1023  
  1024  	// This is a bootstrap map that matches for each peer its own bootstrap peer.
  1025  	// In practice (production) peers should only use peers of their orgs as bootstrap peers,
  1026  	// but the discovery layer is ignorant of organizations.
  1027  	bootPeerMap := map[int]int{
  1028  		8610: 8616,
  1029  		8611: 8610,
  1030  		8612: 8610,
  1031  		8613: 8610,
  1032  		8614: 8610,
  1033  		8615: 8616,
  1034  		8616: 8610,
  1035  		8617: 8616,
  1036  		8618: 8616,
  1037  		8619: 8616,
  1038  	}
  1039  
  1040  	// This map matches each peer, the peers it should know about in the test scenario.
  1041  	peersThatShouldBeKnownToPeers := map[int][]int{
  1042  		8610: {8611, 8612, 8613, 8614, 8616, 8618},
  1043  		8611: {8610, 8612, 8613, 8614},
  1044  		8612: {8610, 8611, 8613, 8614, 8616, 8618},
  1045  		8613: {8610, 8611, 8612, 8614},
  1046  		8614: {8610, 8611, 8612, 8613, 8616, 8618},
  1047  		8615: {8616, 8617, 8618, 8619},
  1048  		8616: {8610, 8612, 8614, 8615, 8617, 8618, 8619},
  1049  		8617: {8615, 8616, 8618, 8619},
  1050  		8618: {8610, 8612, 8614, 8615, 8616, 8617, 8619},
  1051  		8619: {8615, 8616, 8617, 8618},
  1052  	}
  1053  	// Create the peers in the two groups
  1054  	instances1, instances2 := createDisjointPeerGroupsWithNoGossip(bootPeerMap)
  1055  	// Sleep a while to let them establish membership. This time should be more than enough
  1056  	// because the instances are configured to pull membership in very high frequency from
  1057  	// up to 10 peers (which results in - pulling from everyone)
  1058  	waitUntilOrFail(t, func() bool {
  1059  		for _, inst := range append(instances1, instances2...) {
  1060  			// Ensure the expected membership is equal in size to the actual membership
  1061  			// of each peer.
  1062  			portsOfKnownMembers := portsOfMembers(inst.GetMembership())
  1063  			if len(peersThatShouldBeKnownToPeers[inst.port]) != len(portsOfKnownMembers) {
  1064  				return false
  1065  			}
  1066  		}
  1067  		return true
  1068  	})
  1069  	for _, inst := range append(instances1, instances2...) {
  1070  		portsOfKnownMembers := portsOfMembers(inst.GetMembership())
  1071  		// Ensure the expected membership is equal to the actual membership
  1072  		// of each peer. the portsOfMembers returns a sorted slice so assert.Equal does the job.
  1073  		require.Equal(t, peersThatShouldBeKnownToPeers[inst.port], portsOfKnownMembers)
  1074  		// Next, check that internal endpoints aren't leaked across groups,
  1075  		for _, knownPeer := range inst.GetMembership() {
  1076  			// If internal endpoint is known, ensure the peers are in the same group
  1077  			// unless the peer in question is a peer that has a public address.
  1078  			// We cannot control what we disclose about ourselves when we send a membership request
  1079  			if len(knownPeer.InternalEndpoint) > 0 && inst.port%2 != 0 {
  1080  				bothInGroup1 := portOfEndpoint(knownPeer.Endpoint) < 8615 && inst.port < 8615
  1081  				bothInGroup2 := portOfEndpoint(knownPeer.Endpoint) >= 8615 && inst.port >= 8615
  1082  				require.True(t, bothInGroup1 || bothInGroup2, "%v knows about %v's internal endpoint", inst.port, knownPeer.InternalEndpoint)
  1083  			}
  1084  		}
  1085  	}
  1086  
  1087  	t.Log("Shutting down instance 0...")
  1088  	// Now, we shutdown instance 0 and ensure that peers that shouldn't know it,
  1089  	// do not know it via membership requests
  1090  	stopInstances(t, []*gossipInstance{instances1[0]})
  1091  	time.Sleep(time.Second * 6)
  1092  	for _, inst := range append(instances1[1:], instances2...) {
  1093  		if peersThatShouldBeKnownToPeers[inst.port][0] == 8610 {
  1094  			require.Equal(t, 1, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
  1095  		} else {
  1096  			require.Equal(t, 0, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
  1097  		}
  1098  	}
  1099  	stopInstances(t, instances1[1:])
  1100  	stopInstances(t, instances2)
  1101  }
  1102  
  1103  func createDisjointPeerGroupsWithNoGossip(bootPeerMap map[int]int) ([]*gossipInstance, []*gossipInstance) {
  1104  	instances1 := []*gossipInstance{}
  1105  	instances2 := []*gossipInstance{}
  1106  	for group := 0; group < 2; group++ {
  1107  		for i := 0; i < 5; i++ {
  1108  			group := group
  1109  			id := fmt.Sprintf("id%d", group*5+i)
  1110  			port := 8610 + group*5 + i
  1111  			bootPeers := []string{bootPeer(bootPeerMap[port])}
  1112  			pol := discPolForPeer(port)
  1113  			inst := createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(8610+group*5+i, id, bootPeers, pol)
  1114  			inst.initiateSync(defaultTestConfig.AliveExpirationTimeout/3, 10)
  1115  			if group == 0 {
  1116  				instances1 = append(instances1, inst)
  1117  			} else {
  1118  				instances2 = append(instances2, inst)
  1119  			}
  1120  		}
  1121  	}
  1122  	return instances1, instances2
  1123  }
  1124  
  1125  func discPolForPeer(selfPort int) DisclosurePolicy {
  1126  	return func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
  1127  		targetPortStr := strings.Split(remotePeer.Endpoint, ":")[1]
  1128  		targetPort, _ := strconv.ParseInt(targetPortStr, 10, 64)
  1129  		return func(msg *protoext.SignedGossipMessage) bool {
  1130  				portOfAliveMsgStr := strings.Split(msg.GetAliveMsg().Membership.Endpoint, ":")[1]
  1131  				portOfAliveMsg, _ := strconv.ParseInt(portOfAliveMsgStr, 10, 64)
  1132  
  1133  				if portOfAliveMsg < 8615 && targetPort < 8615 {
  1134  					return true
  1135  				}
  1136  				if portOfAliveMsg >= 8615 && targetPort >= 8615 {
  1137  					return true
  1138  				}
  1139  
  1140  				// Else, expose peers with even ids to other peers with even ids
  1141  				return portOfAliveMsg%2 == 0 && targetPort%2 == 0
  1142  			}, func(msg *protoext.SignedGossipMessage) *proto.Envelope {
  1143  				envelope := protoG.Clone(msg.Envelope).(*proto.Envelope)
  1144  				if selfPort < 8615 && targetPort >= 8615 {
  1145  					envelope.SecretEnvelope = nil
  1146  				}
  1147  
  1148  				if selfPort >= 8615 && targetPort < 8615 {
  1149  					envelope.SecretEnvelope = nil
  1150  				}
  1151  
  1152  				return envelope
  1153  			}
  1154  	}
  1155  }
  1156  
  1157  func TestCertificateChange(t *testing.T) {
  1158  	bootPeers := []string{bootPeer(42611), bootPeer(42612), bootPeer(42613)}
  1159  	p1 := createDiscoveryInstance(42611, "d1", bootPeers)
  1160  	p2 := createDiscoveryInstance(42612, "d2", bootPeers)
  1161  	p3 := createDiscoveryInstance(42613, "d3", bootPeers)
  1162  
  1163  	// Wait for membership establishment
  1164  	assertMembership(t, []*gossipInstance{p1, p2, p3}, 2)
  1165  
  1166  	// Shutdown the second peer
  1167  	waitUntilOrFailBlocking(t, p2.Stop)
  1168  
  1169  	var pingCountFrom1 uint32
  1170  	var pingCountFrom3 uint32
  1171  	// Program mocks to increment ping counters
  1172  	p1.comm.lock.Lock()
  1173  	p1.comm.mock = &mock.Mock{}
  1174  	p1.comm.mock.On("SendToPeer", mock.Anything, mock.Anything)
  1175  	p1.comm.mock.On("Ping").Run(func(arguments mock.Arguments) {
  1176  		atomic.AddUint32(&pingCountFrom1, 1)
  1177  	})
  1178  	p1.comm.lock.Unlock()
  1179  
  1180  	p3.comm.lock.Lock()
  1181  	p3.comm.mock = &mock.Mock{}
  1182  	p3.comm.mock.On("SendToPeer", mock.Anything, mock.Anything)
  1183  	p3.comm.mock.On("Ping").Run(func(arguments mock.Arguments) {
  1184  		atomic.AddUint32(&pingCountFrom3, 1)
  1185  	})
  1186  	p3.comm.lock.Unlock()
  1187  
  1188  	pingCount1 := func() uint32 {
  1189  		return atomic.LoadUint32(&pingCountFrom1)
  1190  	}
  1191  
  1192  	pingCount3 := func() uint32 {
  1193  		return atomic.LoadUint32(&pingCountFrom3)
  1194  	}
  1195  
  1196  	c1 := pingCount1()
  1197  	c3 := pingCount3()
  1198  
  1199  	// Ensure the first peer and third peer try to reconnect to it
  1200  	waitUntilTimeoutOrFail(t, func() bool {
  1201  		return pingCount1() > c1 && pingCount3() > c3
  1202  	}, timeout)
  1203  
  1204  	// Tell the first peer that the second peer's PKI-ID has changed
  1205  	// So that it will purge it from the membership entirely
  1206  	p1.comm.identitySwitch <- common.PKIidType("localhost:42612")
  1207  
  1208  	c1 = pingCount1()
  1209  	c3 = pingCount3()
  1210  	// Ensure third peer tries to reconnect to it
  1211  	waitUntilTimeoutOrFail(t, func() bool {
  1212  		return pingCount3() > c3
  1213  	}, timeout)
  1214  
  1215  	// Ensure the first peer ceases from trying
  1216  	require.Equal(t, c1, pingCount1())
  1217  
  1218  	waitUntilOrFailBlocking(t, p1.Stop)
  1219  	waitUntilOrFailBlocking(t, p3.Stop)
  1220  }
  1221  
  1222  func TestMsgStoreExpiration(t *testing.T) {
  1223  	// Starts 4 instances, wait for membership to build, stop 2 instances
  1224  	// Check that membership in 2 running instances become 2
  1225  	// Wait for expiration and check that alive messages and related entities in maps are removed in running instances
  1226  	nodeNum := 4
  1227  	bootPeers := []string{bootPeer(12611), bootPeer(12612)}
  1228  	instances := []*gossipInstance{}
  1229  
  1230  	inst := createDiscoveryInstance(12611, "d1", bootPeers)
  1231  	instances = append(instances, inst)
  1232  
  1233  	inst = createDiscoveryInstance(12612, "d2", bootPeers)
  1234  	instances = append(instances, inst)
  1235  
  1236  	for i := 3; i <= nodeNum; i++ {
  1237  		id := fmt.Sprintf("d%d", i)
  1238  		inst = createDiscoveryInstance(12610+i, id, bootPeers)
  1239  		instances = append(instances, inst)
  1240  	}
  1241  
  1242  	assertMembership(t, instances, nodeNum-1)
  1243  
  1244  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
  1245  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
  1246  
  1247  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
  1248  
  1249  	checkMessages := func() bool {
  1250  		for _, inst := range instances[:len(instances)-2] {
  1251  			for _, downInst := range instances[len(instances)-2:] {
  1252  				downCastInst := inst.discoveryImpl()
  1253  				downCastInst.lock.RLock()
  1254  				if _, exist := downCastInst.aliveLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1255  					downCastInst.lock.RUnlock()
  1256  					return false
  1257  				}
  1258  				if _, exist := downCastInst.deadLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1259  					downCastInst.lock.RUnlock()
  1260  					return false
  1261  				}
  1262  				if _, exist := downCastInst.id2Member[string(downInst.discoveryImpl().self.PKIid)]; exist {
  1263  					downCastInst.lock.RUnlock()
  1264  					return false
  1265  				}
  1266  				if downCastInst.aliveMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
  1267  					downCastInst.lock.RUnlock()
  1268  					return false
  1269  				}
  1270  				if downCastInst.deadMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
  1271  					downCastInst.lock.RUnlock()
  1272  					return false
  1273  				}
  1274  				for _, am := range downCastInst.msgStore.Get() {
  1275  					m := am.(*protoext.SignedGossipMessage).GetAliveMsg()
  1276  					if bytes.Equal(m.Membership.PkiId, downInst.discoveryImpl().self.PKIid) {
  1277  						downCastInst.lock.RUnlock()
  1278  						return false
  1279  					}
  1280  				}
  1281  				downCastInst.lock.RUnlock()
  1282  			}
  1283  		}
  1284  		return true
  1285  	}
  1286  
  1287  	waitUntilTimeoutOrFail(t, checkMessages, defaultTestConfig.AliveExpirationTimeout*(DefMsgExpirationFactor+5))
  1288  
  1289  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
  1290  
  1291  	stopInstances(t, instances[:len(instances)-2])
  1292  }
  1293  
  1294  func TestExpirationNoSecretEnvelope(t *testing.T) {
  1295  	l, err := zap.NewDevelopment()
  1296  	require.NoError(t, err)
  1297  
  1298  	removed := make(chan struct{})
  1299  	logger := flogging.NewFabricLogger(l, zap.Hooks(func(entry zapcore.Entry) error {
  1300  		if strings.Contains(entry.Message, "Removing member: Endpoint: foo") {
  1301  			removed <- struct{}{}
  1302  		}
  1303  		return nil
  1304  	}))
  1305  
  1306  	mockTracker := &mockAnchorPeerTracker{}
  1307  	msgStore := newAliveMsgStore(&gossipDiscoveryImpl{
  1308  		aliveExpirationTimeout: time.Millisecond,
  1309  		lock:                   &sync.RWMutex{},
  1310  		aliveMembership:        util.NewMembershipStore(),
  1311  		deadMembership:         util.NewMembershipStore(),
  1312  		logger:                 logger,
  1313  		anchorPeerTracker:      mockTracker,
  1314  	})
  1315  
  1316  	msg := &proto.GossipMessage{
  1317  		Content: &proto.GossipMessage_AliveMsg{
  1318  			AliveMsg: &proto.AliveMessage{Membership: &proto.Member{
  1319  				Endpoint: "foo",
  1320  			}},
  1321  		},
  1322  	}
  1323  
  1324  	sMsg, err := protoext.NoopSign(msg)
  1325  	require.NoError(t, err)
  1326  
  1327  	msgStore.Add(sMsg)
  1328  	select {
  1329  	case <-removed:
  1330  	case <-time.After(time.Second * 10):
  1331  		t.Fatalf("timed out")
  1332  	}
  1333  }
  1334  
  1335  func TestMsgStoreExpirationWithMembershipMessages(t *testing.T) {
  1336  	// Creates 3 discovery instances without gossip communication
  1337  	// Generates MembershipRequest msg for each instance using createMembershipRequest
  1338  	// Generates Alive msg for each instance using createAliveMessage
  1339  	// Builds membership using Alive msgs
  1340  	// Checks msgStore and related maps
  1341  	// Generates MembershipResponse msgs for each instance using createMembershipResponse
  1342  	// Generates new set of Alive msgs and processes them
  1343  	// Checks msgStore and related maps
  1344  	// Waits for expiration and checks msgStore and related maps
  1345  	// Processes stored MembershipRequest msg and checks msgStore and related maps
  1346  	// Processes stored MembershipResponse msg and checks msgStore and related maps
  1347  	bootPeers := []string{}
  1348  	peersNum := 3
  1349  	instances := []*gossipInstance{}
  1350  	aliveMsgs := []*protoext.SignedGossipMessage{}
  1351  	newAliveMsgs := []*protoext.SignedGossipMessage{}
  1352  	memReqMsgs := []*protoext.SignedGossipMessage{}
  1353  	memRespMsgs := make(map[int][]*proto.MembershipResponse)
  1354  
  1355  	for i := 0; i < peersNum; i++ {
  1356  		id := fmt.Sprintf("d%d", i)
  1357  		inst := createDiscoveryInstanceWithNoGossip(22610+i, id, bootPeers)
  1358  		inst.comm.disableComm = true
  1359  		instances = append(instances, inst)
  1360  	}
  1361  
  1362  	// Creating MembershipRequest messages
  1363  	for i := 0; i < peersNum; i++ {
  1364  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
  1365  		sMsg, _ := protoext.NoopSign(memReqMsg)
  1366  		memReqMsgs = append(memReqMsgs, sMsg)
  1367  	}
  1368  	// Creating Alive messages
  1369  	for i := 0; i < peersNum; i++ {
  1370  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1371  		aliveMsgs = append(aliveMsgs, aliveMsg)
  1372  	}
  1373  
  1374  	repeatForFiltered := func(n int, filter func(i int) bool, action func(i int)) {
  1375  		for i := 0; i < n; i++ {
  1376  			if filter(i) {
  1377  				continue
  1378  			}
  1379  			action(i)
  1380  		}
  1381  	}
  1382  
  1383  	// Handling Alive
  1384  	for i := 0; i < peersNum; i++ {
  1385  		for k := 0; k < peersNum; k++ {
  1386  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1387  				msg: aliveMsgs[k],
  1388  				info: &protoext.ConnectionInfo{
  1389  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1390  				},
  1391  			})
  1392  		}
  1393  	}
  1394  
  1395  	checkExistence := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, i int, step string) {
  1396  		_, exist := instances[index].discoveryImpl().aliveLastTS[string(instances[i].discoveryImpl().self.PKIid)]
  1397  		require.True(t, exist, fmt.Sprint(step, " Data from alive msg ", i, " doesn't exist in aliveLastTS of discovery inst ", index))
  1398  
  1399  		_, exist = instances[index].discoveryImpl().id2Member[string(string(instances[i].discoveryImpl().self.PKIid))]
  1400  		require.True(t, exist, fmt.Sprint(step, " id2Member mapping doesn't exist for alive msg ", i, " of discovery inst ", index))
  1401  
  1402  		require.NotNil(t, instances[index].discoveryImpl().aliveMembership.MsgByID(instances[i].discoveryImpl().self.PKIid), fmt.Sprint(step, " Alive msg", i, " not exist in aliveMembership of discovery inst ", index))
  1403  
  1404  		require.Contains(t, instances[index].discoveryImpl().msgStore.Get(), msgs[i], fmt.Sprint(step, " Alive msg ", i, "not stored in store of discovery inst ", index))
  1405  	}
  1406  
  1407  	checkAliveMsgExist := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, step string) {
  1408  		instances[index].discoveryImpl().lock.RLock()
  1409  		defer instances[index].discoveryImpl().lock.RUnlock()
  1410  		repeatForFiltered(peersNum,
  1411  			func(k int) bool {
  1412  				return k == index
  1413  			},
  1414  			func(k int) {
  1415  				checkExistence(instances, msgs, index, k, step)
  1416  			})
  1417  	}
  1418  
  1419  	// Checking is Alive was processed
  1420  	for i := 0; i < peersNum; i++ {
  1421  		checkAliveMsgExist(instances, aliveMsgs, i, "[Step 1 - processing aliveMsg]")
  1422  	}
  1423  
  1424  	// Creating MembershipResponse while all instances have full membership
  1425  	for i := 0; i < peersNum; i++ {
  1426  		peerToResponse := &NetworkMember{
  1427  			Metadata:         []byte{},
  1428  			PKIid:            []byte(fmt.Sprintf("localhost:%d", 22610+i)),
  1429  			Endpoint:         fmt.Sprintf("localhost:%d", 22610+i),
  1430  			InternalEndpoint: fmt.Sprintf("localhost:%d", 22610+i),
  1431  		}
  1432  		memRespMsgs[i] = []*proto.MembershipResponse{}
  1433  		repeatForFiltered(peersNum,
  1434  			func(k int) bool {
  1435  				return k == i
  1436  			},
  1437  			func(k int) {
  1438  				aliveMsg, _ := instances[k].discoveryImpl().createSignedAliveMessage(true)
  1439  				memResp := instances[k].discoveryImpl().createMembershipResponse(aliveMsg, peerToResponse)
  1440  				memRespMsgs[i] = append(memRespMsgs[i], memResp)
  1441  			})
  1442  	}
  1443  
  1444  	// Re-creating Alive msgs with highest seq_num, to make sure Alive msgs in memReq and memResp are older
  1445  	for i := 0; i < peersNum; i++ {
  1446  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1447  		newAliveMsgs = append(newAliveMsgs, aliveMsg)
  1448  	}
  1449  
  1450  	// Handling new Alive set
  1451  	for i := 0; i < peersNum; i++ {
  1452  		for k := 0; k < peersNum; k++ {
  1453  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1454  				msg: newAliveMsgs[k],
  1455  				info: &protoext.ConnectionInfo{
  1456  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1457  				},
  1458  			})
  1459  		}
  1460  	}
  1461  
  1462  	// Checking is new Alive was processed
  1463  	for i := 0; i < peersNum; i++ {
  1464  		checkAliveMsgExist(instances, newAliveMsgs, i, "[Step 2 - proccesing aliveMsg]")
  1465  	}
  1466  
  1467  	checkAliveMsgNotExist := func(instances []*gossipInstance, msgs []*protoext.SignedGossipMessage, index int, step string) {
  1468  		instances[index].discoveryImpl().lock.RLock()
  1469  		defer instances[index].discoveryImpl().lock.RUnlock()
  1470  		require.Empty(t, instances[index].discoveryImpl().aliveLastTS, fmt.Sprint(step, " Data from alive msg still exists in aliveLastTS of discovery inst ", index))
  1471  		require.Empty(t, instances[index].discoveryImpl().deadLastTS, fmt.Sprint(step, " Data from alive msg still exists in deadLastTS of discovery inst ", index))
  1472  		require.Empty(t, instances[index].discoveryImpl().id2Member, fmt.Sprint(step, " id2Member mapping still still contains data related to Alive msg: discovery inst ", index))
  1473  		require.Empty(t, instances[index].discoveryImpl().msgStore.Get(), fmt.Sprint(step, " Expired Alive msg still stored in store of discovery inst ", index))
  1474  		require.Zero(t, instances[index].discoveryImpl().aliveMembership.Size(), fmt.Sprint(step, " Alive membership list is not empty, discovery instance", index))
  1475  		require.Zero(t, instances[index].discoveryImpl().deadMembership.Size(), fmt.Sprint(step, " Dead membership list is not empty, discovery instance", index))
  1476  	}
  1477  
  1478  	// Sleep until expire
  1479  	time.Sleep(defaultTestConfig.AliveExpirationTimeout * (DefMsgExpirationFactor + 5))
  1480  
  1481  	// Checking Alive expired
  1482  	for i := 0; i < peersNum; i++ {
  1483  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step 3 - expiration in msg store]")
  1484  	}
  1485  
  1486  	// Processing old MembershipRequest
  1487  	for i := 0; i < peersNum; i++ {
  1488  		repeatForFiltered(peersNum,
  1489  			func(k int) bool {
  1490  				return k == i
  1491  			},
  1492  			func(k int) {
  1493  				instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1494  					msg: memReqMsgs[k],
  1495  					info: &protoext.ConnectionInfo{
  1496  						ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1497  					},
  1498  				})
  1499  			})
  1500  	}
  1501  
  1502  	// MembershipRequest processing didn't change anything
  1503  	for i := 0; i < peersNum; i++ {
  1504  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 4 - memReq processing after expiration]")
  1505  	}
  1506  
  1507  	// Processing old (later) Alive messages
  1508  	for i := 0; i < peersNum; i++ {
  1509  		for k := 0; k < peersNum; k++ {
  1510  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1511  				msg: aliveMsgs[k],
  1512  				info: &protoext.ConnectionInfo{
  1513  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1514  				},
  1515  			})
  1516  		}
  1517  	}
  1518  
  1519  	// Alive msg processing didn't change anything
  1520  	for i := 0; i < peersNum; i++ {
  1521  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 5.1 - after lost old aliveMsg process]")
  1522  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step 5.2 - after lost new aliveMsg process]")
  1523  	}
  1524  
  1525  	// Handling old MembershipResponse messages
  1526  	for i := 0; i < peersNum; i++ {
  1527  		respForPeer := memRespMsgs[i]
  1528  		for _, msg := range respForPeer {
  1529  			sMsg, _ := protoext.NoopSign(&proto.GossipMessage{
  1530  				Tag:   proto.GossipMessage_EMPTY,
  1531  				Nonce: uint64(0),
  1532  				Content: &proto.GossipMessage_MemRes{
  1533  					MemRes: msg,
  1534  				},
  1535  			})
  1536  			instances[i].discoveryImpl().handleMsgFromComm(&dummyReceivedMessage{
  1537  				msg: sMsg,
  1538  				info: &protoext.ConnectionInfo{
  1539  					ID: common.PKIidType(fmt.Sprintf("d%d", i)),
  1540  				},
  1541  			})
  1542  		}
  1543  	}
  1544  
  1545  	// MembershipResponse msg processing didn't change anything
  1546  	for i := 0; i < peersNum; i++ {
  1547  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step 6 - after lost MembershipResp process]")
  1548  	}
  1549  
  1550  	for i := 0; i < peersNum; i++ {
  1551  		instances[i].Stop()
  1552  	}
  1553  }
  1554  
  1555  func TestAliveMsgStore(t *testing.T) {
  1556  	bootPeers := []string{}
  1557  	peersNum := 2
  1558  	instances := []*gossipInstance{}
  1559  	aliveMsgs := []*protoext.SignedGossipMessage{}
  1560  	memReqMsgs := []*protoext.SignedGossipMessage{}
  1561  
  1562  	for i := 0; i < peersNum; i++ {
  1563  		id := fmt.Sprintf("d%d", i)
  1564  		inst := createDiscoveryInstanceWithNoGossip(32610+i, id, bootPeers)
  1565  		instances = append(instances, inst)
  1566  	}
  1567  
  1568  	// Creating MembershipRequest messages
  1569  	for i := 0; i < peersNum; i++ {
  1570  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
  1571  		sMsg, _ := protoext.NoopSign(memReqMsg)
  1572  		memReqMsgs = append(memReqMsgs, sMsg)
  1573  	}
  1574  	// Creating Alive messages
  1575  	for i := 0; i < peersNum; i++ {
  1576  		aliveMsg, _ := instances[i].discoveryImpl().createSignedAliveMessage(true)
  1577  		aliveMsgs = append(aliveMsgs, aliveMsg)
  1578  	}
  1579  
  1580  	// Check new alive msgs
  1581  	for _, msg := range aliveMsgs {
  1582  		require.True(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns false on new AliveMsg")
  1583  	}
  1584  
  1585  	// Add new alive msgs
  1586  	for _, msg := range aliveMsgs {
  1587  		require.True(t, instances[0].discoveryImpl().msgStore.Add(msg), "aliveMsgStore Add returns false on new AliveMsg")
  1588  	}
  1589  
  1590  	// Check exist alive msgs
  1591  	for _, msg := range aliveMsgs {
  1592  		require.False(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns true on existing AliveMsg")
  1593  	}
  1594  
  1595  	// Check non-alive msgs
  1596  	for _, msg := range memReqMsgs {
  1597  		require.Panics(t, func() { instances[1].discoveryImpl().msgStore.CheckValid(msg) }, "aliveMsgStore CheckValid should panic on new MembershipRequest msg")
  1598  		require.Panics(t, func() { instances[1].discoveryImpl().msgStore.Add(msg) }, "aliveMsgStore Add should panic on new MembershipRequest msg")
  1599  	}
  1600  }
  1601  
  1602  func TestMemRespDisclosurePol(t *testing.T) {
  1603  	pol := func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
  1604  		assert.Equal(t, remotePeer.InternalEndpoint, remotePeer.Endpoint)
  1605  		return func(_ *protoext.SignedGossipMessage) bool {
  1606  				return remotePeer.Endpoint != "localhost:7879"
  1607  			}, func(m *protoext.SignedGossipMessage) *proto.Envelope {
  1608  				return m.Envelope
  1609  			}
  1610  	}
  1611  
  1612  	wasMembershipResponseReceived := func(msg *protoext.SignedGossipMessage) {
  1613  		assert.Nil(t, msg.GetMemRes())
  1614  	}
  1615  
  1616  	d1 := createDiscoveryInstanceThatGossips(7878, "d1", []string{}, true, pol, defaultTestConfig)
  1617  	defer d1.Stop()
  1618  	d2 := createDiscoveryInstanceThatGossipsWithInterceptors(7879, "d2", []string{"localhost:7878"}, true, noopPolicy, wasMembershipResponseReceived, defaultTestConfig)
  1619  	defer d2.Stop()
  1620  	d3 := createDiscoveryInstanceThatGossips(7880, "d3", []string{"localhost:7878"}, true, pol, defaultTestConfig)
  1621  	defer d3.Stop()
  1622  
  1623  	// all peers know each other
  1624  	assertMembership(t, []*gossipInstance{d1, d2, d3}, 2)
  1625  	// d2 received some messages, but we asserted that none of them are membership responses.
  1626  	assert.NotZero(t, d2.receivedMsgCount())
  1627  	assert.NotZero(t, d2.sentMsgCount())
  1628  }
  1629  
  1630  func TestMembersByID(t *testing.T) {
  1631  	members := Members{
  1632  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1633  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1634  	}
  1635  	byID := members.ByID()
  1636  	require.Len(t, byID, 2)
  1637  	require.Equal(t, "p0", byID["p0"].Endpoint)
  1638  	require.Equal(t, "p1", byID["p1"].Endpoint)
  1639  }
  1640  
  1641  func TestFilter(t *testing.T) {
  1642  	members := Members{
  1643  		{PKIid: common.PKIidType("p0"), Endpoint: "p0", Properties: &proto.Properties{
  1644  			Chaincodes: []*proto.Chaincode{{Name: "cc", Version: "1.0"}},
  1645  		}},
  1646  		{PKIid: common.PKIidType("p1"), Endpoint: "p1", Properties: &proto.Properties{
  1647  			Chaincodes: []*proto.Chaincode{{Name: "cc", Version: "2.0"}},
  1648  		}},
  1649  	}
  1650  	res := members.Filter(func(member NetworkMember) bool {
  1651  		cc := member.Properties.Chaincodes[0]
  1652  		return cc.Version == "2.0" && cc.Name == "cc"
  1653  	})
  1654  	require.Equal(t, Members{members[1]}, res)
  1655  }
  1656  
  1657  func TestMap(t *testing.T) {
  1658  	members := Members{
  1659  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1660  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1661  	}
  1662  	expectedMembers := Members{
  1663  		{PKIid: common.PKIidType("p0"), Endpoint: "p0", Properties: &proto.Properties{LedgerHeight: 2}},
  1664  		{PKIid: common.PKIidType("p1"), Endpoint: "p1", Properties: &proto.Properties{LedgerHeight: 2}},
  1665  	}
  1666  
  1667  	addProperty := func(member NetworkMember) NetworkMember {
  1668  		member.Properties = &proto.Properties{
  1669  			LedgerHeight: 2,
  1670  		}
  1671  		return member
  1672  	}
  1673  
  1674  	require.Equal(t, expectedMembers, members.Map(addProperty))
  1675  	// Ensure original members didn't change
  1676  	require.Nil(t, members[0].Properties)
  1677  	require.Nil(t, members[1].Properties)
  1678  }
  1679  
  1680  func TestMembersIntersect(t *testing.T) {
  1681  	members1 := Members{
  1682  		{PKIid: common.PKIidType("p0"), Endpoint: "p0"},
  1683  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1684  	}
  1685  	members2 := Members{
  1686  		{PKIid: common.PKIidType("p1"), Endpoint: "p1"},
  1687  		{PKIid: common.PKIidType("p2"), Endpoint: "p2"},
  1688  	}
  1689  	require.Equal(t, Members{{PKIid: common.PKIidType("p1"), Endpoint: "p1"}}, members1.Intersect(members2))
  1690  }
  1691  
  1692  func TestPeerIsolation(t *testing.T) {
  1693  	// Scenario:
  1694  	// Start 3 peers (peer0, peer1, peer2). Set peer1 as the bootstrap peer for all.
  1695  	// Stop peer0 and peer1 for a while, start them again and test if peer2 still gets full membership
  1696  
  1697  	config := defaultTestConfig
  1698  	// Use a smaller AliveExpirationTimeout than the default to reduce the running time of the test.
  1699  	config.AliveExpirationTimeout = 2 * config.AliveTimeInterval
  1700  
  1701  	peersNum := 3
  1702  	bootPeers := []string{bootPeer(7121)}
  1703  	instances := []*gossipInstance{}
  1704  	var inst *gossipInstance
  1705  
  1706  	// Start all peers and wait for full membership
  1707  	for i := 0; i < peersNum; i++ {
  1708  		id := fmt.Sprintf("d%d", i)
  1709  		inst = createDiscoveryInstanceCustomConfig(7120+i, id, bootPeers, config)
  1710  		instances = append(instances, inst)
  1711  	}
  1712  	assertMembership(t, instances, peersNum-1)
  1713  
  1714  	// Stop the first 2 peers so the third peer would stay alone
  1715  	stopInstances(t, instances[:peersNum-1])
  1716  	assertMembership(t, instances[peersNum-1:], 0)
  1717  
  1718  	// Sleep the same amount of time as it takes to remove a message from the aliveMsgStore (aliveMsgTTL)
  1719  	// Add a second as buffer
  1720  	time.Sleep(config.AliveExpirationTimeout*DefMsgExpirationFactor + time.Second)
  1721  
  1722  	// Start again the first 2 peers and wait for all the peers to get full membership.
  1723  	// Especially, we want to test that peer2 won't be isolated
  1724  	for i := 0; i < peersNum-1; i++ {
  1725  		id := fmt.Sprintf("d%d", i)
  1726  		inst = createDiscoveryInstanceCustomConfig(7120+i, id, bootPeers, config)
  1727  		instances[i] = inst
  1728  	}
  1729  	assertMembership(t, instances, peersNum-1)
  1730  }
  1731  
  1732  func TestMembershipAfterExpiration(t *testing.T) {
  1733  	// Scenario:
  1734  	// Start 3 peers (peer0, peer1, peer2). Set peer0 as the anchor peer.
  1735  	// Stop peer0 and peer1 for a while, start them again and test if peer2 still gets full membership
  1736  
  1737  	config := defaultTestConfig
  1738  	// Use a smaller AliveExpirationTimeout than the default to reduce the running time of the test.
  1739  	config.AliveExpirationTimeout = 2 * config.AliveTimeInterval
  1740  	config.ReconnectInterval = config.AliveExpirationTimeout
  1741  	config.MsgExpirationFactor = 5
  1742  
  1743  	peersNum := 3
  1744  	ports := []int{9120, 9121, 9122}
  1745  	anchorPeer := "localhost:9120"
  1746  	bootPeers := []string{}
  1747  	instances := []*gossipInstance{}
  1748  	var inst *gossipInstance
  1749  	mockTracker := &mockAnchorPeerTracker{[]string{anchorPeer}}
  1750  
  1751  	l, err := zap.NewDevelopment()
  1752  	assert.NoError(t, err)
  1753  	expired := make(chan struct{}, 1)
  1754  
  1755  	// use a custom logger to verify messages from expiration callback
  1756  	loggerThatTracksCustomMessage := func() util.Logger {
  1757  		var lock sync.RWMutex
  1758  		expectedMsgs := map[string]struct{}{
  1759  			"Do not remove bootstrap or anchor peer endpoint localhost:9120 from membership":                                   {},
  1760  			"Removing member: Endpoint: localhost:9121, InternalEndpoint: localhost:9121, PKIID: 6c6f63616c686f73743a39313231": {},
  1761  		}
  1762  
  1763  		return flogging.NewFabricLogger(l, zap.Hooks(func(entry zapcore.Entry) error {
  1764  			// do nothing if we already found all the expectedMsgs
  1765  			lock.RLock()
  1766  			expectedMsgSize := len(expectedMsgs)
  1767  			lock.RUnlock()
  1768  
  1769  			if expectedMsgSize == 0 {
  1770  				select {
  1771  				case expired <- struct{}{}:
  1772  				default:
  1773  					// no room is fine, continue
  1774  				}
  1775  				return nil
  1776  			}
  1777  
  1778  			lock.Lock()
  1779  			defer lock.Unlock()
  1780  
  1781  			delete(expectedMsgs, entry.Message)
  1782  			return nil
  1783  		}))
  1784  	}
  1785  
  1786  	// Start all peers, connect to the anchor peer and verify full membership
  1787  	for i := 0; i < peersNum; i++ {
  1788  		id := fmt.Sprintf("d%d", i)
  1789  		logger := loggerThatTracksCustomMessage()
  1790  		inst = createDiscoveryInstanceWithAnchorPeerTracker(ports[i], id, bootPeers, true, noopPolicy, func(_ *protoext.SignedGossipMessage) {}, config, mockTracker, logger)
  1791  		instances = append(instances, inst)
  1792  	}
  1793  	for i := 1; i < peersNum; i++ {
  1794  		connect(instances[i], anchorPeer)
  1795  	}
  1796  	assertMembership(t, instances, peersNum-1)
  1797  
  1798  	// Stop peer0 and peer1 so that peer2 would stay alone
  1799  	stopInstances(t, instances[0:peersNum-1])
  1800  
  1801  	// waitTime is the same amount of time as it takes to remove a message from the aliveMsgStore (aliveMsgTTL)
  1802  	// Add a second as buffer
  1803  	waitTime := config.AliveExpirationTimeout*time.Duration(config.MsgExpirationFactor) + time.Second
  1804  	select {
  1805  	case <-expired:
  1806  	case <-time.After(waitTime):
  1807  		t.Fatalf("timed out")
  1808  	}
  1809  	// peer2's deadMembership should contain the anchor peer
  1810  	deadMemeberShip := instances[peersNum-1].discoveryImpl().deadMembership
  1811  	require.Equal(t, 1, deadMemeberShip.Size())
  1812  	assertMembership(t, instances[peersNum-1:], 0)
  1813  
  1814  	// Start again peer0 and peer1 and wait for all the peers to get full membership.
  1815  	// Especially, we want to test that peer2 won't be isolated
  1816  	for i := 0; i < peersNum-1; i++ {
  1817  		id := fmt.Sprintf("d%d", i)
  1818  		inst = createDiscoveryInstanceWithAnchorPeerTracker(ports[i], id, bootPeers, true, noopPolicy, func(_ *protoext.SignedGossipMessage) {}, config, mockTracker, nil)
  1819  		instances[i] = inst
  1820  	}
  1821  	connect(instances[1], anchorPeer)
  1822  	assertMembership(t, instances, peersNum-1)
  1823  }
  1824  
  1825  func connect(inst *gossipInstance, endpoint string) {
  1826  	inst.comm.lock.Lock()
  1827  	inst.comm.mock = &mock.Mock{}
  1828  	inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
  1829  		inst := inst
  1830  		msg := arguments.Get(1).(*protoext.SignedGossipMessage)
  1831  		if req := msg.GetMemReq(); req != nil {
  1832  			inst.comm.lock.Lock()
  1833  			inst.comm.mock = nil
  1834  			inst.comm.lock.Unlock()
  1835  		}
  1836  	})
  1837  	inst.comm.mock.On("Ping", mock.Anything)
  1838  	inst.comm.lock.Unlock()
  1839  	netMember2Connect2 := NetworkMember{Endpoint: endpoint, PKIid: []byte(endpoint)}
  1840  	inst.Connect(netMember2Connect2, func() (identification *PeerIdentification, err error) {
  1841  		return &PeerIdentification{SelfOrg: true, ID: nil}, nil
  1842  	})
  1843  }
  1844  
  1845  func waitUntilOrFail(t *testing.T, pred func() bool) {
  1846  	waitUntilTimeoutOrFail(t, pred, timeout)
  1847  }
  1848  
  1849  func waitUntilTimeoutOrFail(t *testing.T, pred func() bool, timeout time.Duration) {
  1850  	start := time.Now()
  1851  	limit := start.UnixNano() + timeout.Nanoseconds()
  1852  	for time.Now().UnixNano() < limit {
  1853  		if pred() {
  1854  			return
  1855  		}
  1856  		time.Sleep(timeout / 10)
  1857  	}
  1858  	require.Fail(t, "Timeout expired!")
  1859  }
  1860  
  1861  func waitUntilOrFailBlocking(t *testing.T, f func()) {
  1862  	successChan := make(chan struct{}, 1)
  1863  	go func() {
  1864  		f()
  1865  		successChan <- struct{}{}
  1866  	}()
  1867  	select {
  1868  	case <-time.NewTimer(timeout).C:
  1869  		break
  1870  	case <-successChan:
  1871  		return
  1872  	}
  1873  	require.Fail(t, "Timeout expired!")
  1874  }
  1875  
  1876  func stopInstances(t *testing.T, instances []*gossipInstance) {
  1877  	stopAction := &sync.WaitGroup{}
  1878  	for _, inst := range instances {
  1879  		stopAction.Add(1)
  1880  		go func(inst *gossipInstance) {
  1881  			defer stopAction.Done()
  1882  			inst.Stop()
  1883  		}(inst)
  1884  	}
  1885  
  1886  	waitUntilOrFailBlocking(t, stopAction.Wait)
  1887  }
  1888  
  1889  func assertMembership(t *testing.T, instances []*gossipInstance, expectedNum int) {
  1890  	wg := sync.WaitGroup{}
  1891  	wg.Add(len(instances))
  1892  
  1893  	ctx, cancelation := context.WithTimeout(context.Background(), timeout)
  1894  	defer cancelation()
  1895  
  1896  	for _, inst := range instances {
  1897  		go func(ctx context.Context, i *gossipInstance) {
  1898  			defer wg.Done()
  1899  			for {
  1900  				select {
  1901  				case <-ctx.Done():
  1902  					return
  1903  				case <-time.After(timeout / 10):
  1904  					if len(i.GetMembership()) == expectedNum {
  1905  						return
  1906  					}
  1907  				}
  1908  			}
  1909  		}(ctx, inst)
  1910  	}
  1911  
  1912  	wg.Wait()
  1913  	require.NoError(t, ctx.Err(), "Timeout expired!")
  1914  }
  1915  
  1916  func portsOfMembers(members []NetworkMember) []int {
  1917  	ports := make([]int, len(members))
  1918  	for i := range members {
  1919  		ports[i] = portOfEndpoint(members[i].Endpoint)
  1920  	}
  1921  	sort.Ints(ports)
  1922  	return ports
  1923  }
  1924  
  1925  func portOfEndpoint(endpoint string) int {
  1926  	port, _ := strconv.ParseInt(strings.Split(endpoint, ":")[1], 10, 64)
  1927  	return int(port)
  1928  }