github.com/darrenli6/fabric-sdk-example@v0.0.0-20220109053535-94b13b56df8c/gossip/discovery/discovery_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package discovery
     8  
     9  import (
    10  	"bytes"
    11  	"fmt"
    12  	"io"
    13  	"math/rand"
    14  	"net"
    15  	"sort"
    16  	"strconv"
    17  	"strings"
    18  	"sync"
    19  	"sync/atomic"
    20  	"testing"
    21  	"time"
    22  
    23  	"github.com/hyperledger/fabric/core/config"
    24  	"github.com/hyperledger/fabric/gossip/common"
    25  	"github.com/hyperledger/fabric/gossip/util"
    26  	proto "github.com/hyperledger/fabric/protos/gossip"
    27  	"github.com/spf13/viper"
    28  	"github.com/stretchr/testify/assert"
    29  	"github.com/stretchr/testify/mock"
    30  	"golang.org/x/net/context"
    31  	"google.golang.org/grpc"
    32  )
    33  
    34  var timeout = time.Second * time.Duration(15)
    35  
    36  func init() {
    37  	util.SetupTestLogging()
    38  	aliveTimeInterval := time.Duration(time.Millisecond * 100)
    39  	SetAliveTimeInterval(aliveTimeInterval)
    40  	SetAliveExpirationTimeout(10 * aliveTimeInterval)
    41  	SetAliveExpirationCheckInterval(aliveTimeInterval)
    42  	SetReconnectInterval(10 * aliveTimeInterval)
    43  	maxConnectionAttempts = 10000
    44  }
    45  
    46  type dummyCommModule struct {
    47  	msgsReceived uint32
    48  	msgsSent     uint32
    49  	id           string
    50  	presumeDead  chan common.PKIidType
    51  	detectedDead chan string
    52  	streams      map[string]proto.Gossip_GossipStreamClient
    53  	conns        map[string]*grpc.ClientConn
    54  	lock         *sync.RWMutex
    55  	incMsgs      chan *proto.SignedGossipMessage
    56  	lastSeqs     map[string]uint64
    57  	shouldGossip bool
    58  	mock         *mock.Mock
    59  }
    60  
    61  type gossipInstance struct {
    62  	comm *dummyCommModule
    63  	Discovery
    64  	gRGCserv      *grpc.Server
    65  	lsnr          net.Listener
    66  	shouldGossip  bool
    67  	syncInitiator *time.Ticker
    68  	stopChan      chan struct{}
    69  	port          int
    70  }
    71  
    72  func (comm *dummyCommModule) ValidateAliveMsg(am *proto.SignedGossipMessage) bool {
    73  	return true
    74  }
    75  
    76  func (comm *dummyCommModule) SignMessage(am *proto.GossipMessage, internalEndpoint string) *proto.Envelope {
    77  	am.NoopSign()
    78  
    79  	secret := &proto.Secret{
    80  		Content: &proto.Secret_InternalEndpoint{
    81  			InternalEndpoint: internalEndpoint,
    82  		},
    83  	}
    84  	signer := func(msg []byte) ([]byte, error) {
    85  		return nil, nil
    86  	}
    87  	s, _ := am.NoopSign()
    88  	env := s.Envelope
    89  	env.SignSecret(signer, secret)
    90  	return env
    91  }
    92  
    93  func (comm *dummyCommModule) Gossip(msg *proto.SignedGossipMessage) {
    94  	if !comm.shouldGossip {
    95  		return
    96  	}
    97  	comm.lock.Lock()
    98  	defer comm.lock.Unlock()
    99  	for _, conn := range comm.streams {
   100  		conn.Send(msg.Envelope)
   101  	}
   102  }
   103  
   104  func (comm *dummyCommModule) SendToPeer(peer *NetworkMember, msg *proto.SignedGossipMessage) {
   105  	comm.lock.RLock()
   106  	_, exists := comm.streams[peer.Endpoint]
   107  	mock := comm.mock
   108  	comm.lock.RUnlock()
   109  
   110  	if mock != nil {
   111  		mock.Called(peer, msg)
   112  	}
   113  
   114  	if !exists {
   115  		if comm.Ping(peer) == false {
   116  			fmt.Printf("Ping to %v failed\n", peer.Endpoint)
   117  			return
   118  		}
   119  	}
   120  	comm.lock.Lock()
   121  	s, _ := msg.NoopSign()
   122  	comm.streams[peer.Endpoint].Send(s.Envelope)
   123  	comm.lock.Unlock()
   124  	atomic.AddUint32(&comm.msgsSent, 1)
   125  }
   126  
   127  func (comm *dummyCommModule) Ping(peer *NetworkMember) bool {
   128  	comm.lock.Lock()
   129  	defer comm.lock.Unlock()
   130  
   131  	if comm.mock != nil {
   132  		comm.mock.Called()
   133  	}
   134  
   135  	_, alreadyExists := comm.streams[peer.Endpoint]
   136  	if !alreadyExists {
   137  		newConn, err := grpc.Dial(peer.Endpoint, grpc.WithInsecure())
   138  		if err != nil {
   139  			return false
   140  		}
   141  		if stream, err := proto.NewGossipClient(newConn).GossipStream(context.Background()); err == nil {
   142  			comm.conns[peer.Endpoint] = newConn
   143  			comm.streams[peer.Endpoint] = stream
   144  			return true
   145  		}
   146  		return false
   147  	}
   148  	conn := comm.conns[peer.Endpoint]
   149  	if _, err := proto.NewGossipClient(conn).Ping(context.Background(), &proto.Empty{}); err != nil {
   150  		return false
   151  	}
   152  	return true
   153  }
   154  
   155  func (comm *dummyCommModule) Accept() <-chan *proto.SignedGossipMessage {
   156  	return comm.incMsgs
   157  }
   158  
   159  func (comm *dummyCommModule) PresumedDead() <-chan common.PKIidType {
   160  	return comm.presumeDead
   161  }
   162  
   163  func (comm *dummyCommModule) CloseConn(peer *NetworkMember) {
   164  	comm.lock.Lock()
   165  	defer comm.lock.Unlock()
   166  
   167  	if _, exists := comm.streams[peer.Endpoint]; !exists {
   168  		return
   169  	}
   170  
   171  	comm.streams[peer.Endpoint].CloseSend()
   172  	comm.conns[peer.Endpoint].Close()
   173  }
   174  
   175  func (g *gossipInstance) receivedMsgCount() int {
   176  	return int(atomic.LoadUint32(&g.comm.msgsReceived))
   177  }
   178  
   179  func (g *gossipInstance) sentMsgCount() int {
   180  	return int(atomic.LoadUint32(&g.comm.msgsSent))
   181  }
   182  
   183  func (g *gossipInstance) discoveryImpl() *gossipDiscoveryImpl {
   184  	return g.Discovery.(*gossipDiscoveryImpl)
   185  }
   186  
   187  func (g *gossipInstance) initiateSync(frequency time.Duration, peerNum int) {
   188  	g.syncInitiator = time.NewTicker(frequency)
   189  	g.stopChan = make(chan struct{})
   190  	go func() {
   191  		for {
   192  			select {
   193  			case <-g.syncInitiator.C:
   194  				g.Discovery.InitiateSync(peerNum)
   195  			case <-g.stopChan:
   196  				g.syncInitiator.Stop()
   197  				return
   198  			}
   199  		}
   200  	}()
   201  }
   202  
   203  func (g *gossipInstance) GossipStream(stream proto.Gossip_GossipStreamServer) error {
   204  	for {
   205  		envelope, err := stream.Recv()
   206  		if err == io.EOF {
   207  			return nil
   208  		}
   209  		if err != nil {
   210  			return err
   211  		}
   212  		lgr := g.Discovery.(*gossipDiscoveryImpl).logger
   213  		gMsg, err := envelope.ToGossipMessage()
   214  		if err != nil {
   215  			lgr.Warning("Failed deserializing GossipMessage from envelope:", err)
   216  			continue
   217  		}
   218  
   219  		lgr.Debug(g.Discovery.Self().Endpoint, "Got message:", gMsg)
   220  		g.comm.incMsgs <- gMsg
   221  		atomic.AddUint32(&g.comm.msgsReceived, 1)
   222  
   223  		if aliveMsg := gMsg.GetAliveMsg(); aliveMsg != nil {
   224  			g.tryForwardMessage(gMsg)
   225  		}
   226  	}
   227  }
   228  
   229  func (g *gossipInstance) tryForwardMessage(msg *proto.SignedGossipMessage) {
   230  	g.comm.lock.Lock()
   231  
   232  	aliveMsg := msg.GetAliveMsg()
   233  
   234  	forward := false
   235  	id := string(aliveMsg.Membership.PkiId)
   236  	seqNum := aliveMsg.Timestamp.SeqNum
   237  	if last, exists := g.comm.lastSeqs[id]; exists {
   238  		if last < seqNum {
   239  			g.comm.lastSeqs[id] = seqNum
   240  			forward = true
   241  		}
   242  	} else {
   243  		g.comm.lastSeqs[id] = seqNum
   244  		forward = true
   245  	}
   246  
   247  	g.comm.lock.Unlock()
   248  
   249  	if forward {
   250  		g.comm.Gossip(msg)
   251  	}
   252  }
   253  
   254  func (g *gossipInstance) Stop() {
   255  	if g.syncInitiator != nil {
   256  		g.stopChan <- struct{}{}
   257  	}
   258  	g.gRGCserv.Stop()
   259  	g.lsnr.Close()
   260  	for _, stream := range g.comm.streams {
   261  		stream.CloseSend()
   262  	}
   263  	for _, conn := range g.comm.conns {
   264  		conn.Close()
   265  	}
   266  	g.Discovery.Stop()
   267  }
   268  
   269  func (g *gossipInstance) Ping(context.Context, *proto.Empty) (*proto.Empty, error) {
   270  	return &proto.Empty{}, nil
   271  }
   272  
   273  var noopPolicy = func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
   274  	return func(msg *proto.SignedGossipMessage) bool {
   275  			return true
   276  		}, func(message *proto.SignedGossipMessage) *proto.Envelope {
   277  			return message.Envelope
   278  		}
   279  }
   280  
   281  func createDiscoveryInstance(port int, id string, bootstrapPeers []string) *gossipInstance {
   282  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, true, noopPolicy)
   283  }
   284  
   285  func createDiscoveryInstanceWithNoGossip(port int, id string, bootstrapPeers []string) *gossipInstance {
   286  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, noopPolicy)
   287  }
   288  
   289  func createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(port int, id string, bootstrapPeers []string, pol DisclosurePolicy) *gossipInstance {
   290  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false, pol)
   291  }
   292  
   293  func createDiscoveryInstanceThatGossips(port int, id string, bootstrapPeers []string, shouldGossip bool, pol DisclosurePolicy) *gossipInstance {
   294  	comm := &dummyCommModule{
   295  		conns:        make(map[string]*grpc.ClientConn),
   296  		streams:      make(map[string]proto.Gossip_GossipStreamClient),
   297  		incMsgs:      make(chan *proto.SignedGossipMessage, 1000),
   298  		presumeDead:  make(chan common.PKIidType, 10000),
   299  		id:           id,
   300  		detectedDead: make(chan string, 10000),
   301  		lock:         &sync.RWMutex{},
   302  		lastSeqs:     make(map[string]uint64),
   303  		shouldGossip: shouldGossip,
   304  	}
   305  
   306  	endpoint := fmt.Sprintf("localhost:%d", port)
   307  	self := NetworkMember{
   308  		Metadata:         []byte{},
   309  		PKIid:            []byte(endpoint),
   310  		Endpoint:         endpoint,
   311  		InternalEndpoint: endpoint,
   312  	}
   313  
   314  	listenAddress := fmt.Sprintf("%s:%d", "", port)
   315  	ll, err := net.Listen("tcp", listenAddress)
   316  	if err != nil {
   317  		fmt.Printf("Error listening on %v, %v", listenAddress, err)
   318  	}
   319  	s := grpc.NewServer()
   320  
   321  	discSvc := NewDiscoveryService(self, comm, comm, pol)
   322  	for _, bootPeer := range bootstrapPeers {
   323  		discSvc.Connect(NetworkMember{Endpoint: bootPeer, InternalEndpoint: bootPeer}, func() (*PeerIdentification, error) {
   324  			return &PeerIdentification{SelfOrg: true, ID: common.PKIidType(bootPeer)}, nil
   325  		})
   326  	}
   327  
   328  	gossInst := &gossipInstance{comm: comm, gRGCserv: s, Discovery: discSvc, lsnr: ll, shouldGossip: shouldGossip, port: port}
   329  
   330  	proto.RegisterGossipServer(s, gossInst)
   331  	go s.Serve(ll)
   332  
   333  	return gossInst
   334  }
   335  
   336  func bootPeer(port int) string {
   337  	return fmt.Sprintf("localhost:%d", port)
   338  }
   339  
   340  func TestToString(t *testing.T) {
   341  	nm := NetworkMember{
   342  		Endpoint:         "a",
   343  		InternalEndpoint: "b",
   344  	}
   345  	assert.Equal(t, "b", nm.PreferredEndpoint())
   346  	nm = NetworkMember{
   347  		Endpoint: "a",
   348  	}
   349  	assert.Equal(t, "a", nm.PreferredEndpoint())
   350  
   351  	now := time.Now()
   352  	ts := &timestamp{
   353  		incTime: now,
   354  		seqNum:  uint64(42),
   355  	}
   356  	assert.Equal(t, fmt.Sprintf("%d, %d", now.UnixNano(), 42), fmt.Sprint(ts))
   357  }
   358  
   359  func TestBadInput(t *testing.T) {
   360  	inst := createDiscoveryInstance(2048, fmt.Sprintf("d%d", 0), []string{})
   361  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(nil)
   362  	s, _ := (&proto.GossipMessage{
   363  		Content: &proto.GossipMessage_DataMsg{
   364  			DataMsg: &proto.DataMessage{},
   365  		},
   366  	}).NoopSign()
   367  	inst.Discovery.(*gossipDiscoveryImpl).handleMsgFromComm(s)
   368  }
   369  
   370  func TestConnect(t *testing.T) {
   371  	t.Parallel()
   372  	nodeNum := 10
   373  	instances := []*gossipInstance{}
   374  	firstSentMemReqMsgs := make(chan *proto.SignedGossipMessage, nodeNum)
   375  	for i := 0; i < nodeNum; i++ {
   376  		inst := createDiscoveryInstance(7611+i, fmt.Sprintf("d%d", i), []string{})
   377  
   378  		inst.comm.lock.Lock()
   379  		inst.comm.mock = &mock.Mock{}
   380  		inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
   381  			inst := inst
   382  			msg := arguments.Get(1).(*proto.SignedGossipMessage)
   383  			if req := msg.GetMemReq(); req != nil {
   384  				selfMsg, _ := req.SelfInformation.ToGossipMessage()
   385  				firstSentMemReqMsgs <- selfMsg
   386  				inst.comm.lock.Lock()
   387  				inst.comm.mock = nil
   388  				inst.comm.lock.Unlock()
   389  			}
   390  		})
   391  		inst.comm.mock.On("Ping", mock.Anything)
   392  		inst.comm.lock.Unlock()
   393  
   394  		instances = append(instances, inst)
   395  		j := (i + 1) % 10
   396  		endpoint := fmt.Sprintf("localhost:%d", 7611+j)
   397  		netMember2Connect2 := NetworkMember{Endpoint: endpoint, PKIid: []byte(endpoint)}
   398  		inst.Connect(netMember2Connect2, func() (identification *PeerIdentification, err error) {
   399  			return &PeerIdentification{SelfOrg: false, ID: nil}, nil
   400  		})
   401  	}
   402  
   403  	time.Sleep(time.Second * 3)
   404  	assert.Len(t, firstSentMemReqMsgs, 10)
   405  	close(firstSentMemReqMsgs)
   406  	for firstSentSelfMsg := range firstSentMemReqMsgs {
   407  		assert.Nil(t, firstSentSelfMsg.Envelope.SecretEnvelope)
   408  	}
   409  
   410  	fullMembership := func() bool {
   411  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   412  	}
   413  	waitUntilOrFail(t, fullMembership)
   414  
   415  	discInst := instances[rand.Intn(len(instances))].Discovery.(*gossipDiscoveryImpl)
   416  	mr, _ := discInst.createMembershipRequest(true)
   417  	am, _ := mr.GetMemReq().SelfInformation.ToGossipMessage()
   418  	assert.NotNil(t, am.SecretEnvelope)
   419  	mr2, _ := discInst.createMembershipRequest(false)
   420  	am, _ = mr2.GetMemReq().SelfInformation.ToGossipMessage()
   421  	assert.Nil(t, am.SecretEnvelope)
   422  	stopInstances(t, instances)
   423  }
   424  
   425  func TestUpdate(t *testing.T) {
   426  	t.Parallel()
   427  	nodeNum := 5
   428  	bootPeers := []string{bootPeer(6611), bootPeer(6612)}
   429  	instances := []*gossipInstance{}
   430  
   431  	inst := createDiscoveryInstance(6611, "d1", bootPeers)
   432  	instances = append(instances, inst)
   433  
   434  	inst = createDiscoveryInstance(6612, "d2", bootPeers)
   435  	instances = append(instances, inst)
   436  
   437  	for i := 3; i <= nodeNum; i++ {
   438  		id := fmt.Sprintf("d%d", i)
   439  		inst = createDiscoveryInstance(6610+i, id, bootPeers)
   440  		instances = append(instances, inst)
   441  	}
   442  
   443  	fullMembership := func() bool {
   444  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   445  	}
   446  
   447  	waitUntilOrFail(t, fullMembership)
   448  
   449  	instances[0].UpdateMetadata([]byte("bla bla"))
   450  	instances[nodeNum-1].UpdateEndpoint("localhost:5511")
   451  
   452  	checkMembership := func() bool {
   453  		for _, member := range instances[nodeNum-1].GetMembership() {
   454  			if string(member.PKIid) == instances[0].comm.id {
   455  				if "bla bla" != string(member.Metadata) {
   456  					return false
   457  				}
   458  			}
   459  		}
   460  
   461  		for _, member := range instances[0].GetMembership() {
   462  			if string(member.PKIid) == instances[nodeNum-1].comm.id {
   463  				if "localhost:5511" != string(member.Endpoint) {
   464  					return false
   465  				}
   466  			}
   467  		}
   468  		return true
   469  	}
   470  
   471  	waitUntilOrFail(t, checkMembership)
   472  	stopInstances(t, instances)
   473  }
   474  
   475  func TestInitiateSync(t *testing.T) {
   476  	t.Parallel()
   477  	nodeNum := 10
   478  	bootPeers := []string{bootPeer(3611), bootPeer(3612)}
   479  	instances := []*gossipInstance{}
   480  
   481  	toDie := int32(0)
   482  	for i := 1; i <= nodeNum; i++ {
   483  		id := fmt.Sprintf("d%d", i)
   484  		inst := createDiscoveryInstanceWithNoGossip(3610+i, id, bootPeers)
   485  		instances = append(instances, inst)
   486  		go func() {
   487  			for {
   488  				if atomic.LoadInt32(&toDie) == int32(1) {
   489  					return
   490  				}
   491  				time.Sleep(getAliveExpirationTimeout() / 3)
   492  				inst.InitiateSync(9)
   493  			}
   494  		}()
   495  	}
   496  	time.Sleep(getAliveExpirationTimeout() * 4)
   497  	assertMembership(t, instances, nodeNum-1)
   498  	atomic.StoreInt32(&toDie, int32(1))
   499  	stopInstances(t, instances)
   500  }
   501  
   502  func TestExpiration(t *testing.T) {
   503  	t.Parallel()
   504  	nodeNum := 5
   505  	bootPeers := []string{bootPeer(2611), bootPeer(2612)}
   506  	instances := []*gossipInstance{}
   507  
   508  	inst := createDiscoveryInstance(2611, "d1", bootPeers)
   509  	instances = append(instances, inst)
   510  
   511  	inst = createDiscoveryInstance(2612, "d2", bootPeers)
   512  	instances = append(instances, inst)
   513  
   514  	for i := 3; i <= nodeNum; i++ {
   515  		id := fmt.Sprintf("d%d", i)
   516  		inst = createDiscoveryInstance(2610+i, id, bootPeers)
   517  		instances = append(instances, inst)
   518  	}
   519  
   520  	assertMembership(t, instances, nodeNum-1)
   521  
   522  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
   523  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
   524  
   525  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
   526  
   527  	stopAction := &sync.WaitGroup{}
   528  	for i, inst := range instances {
   529  		if i+2 == nodeNum {
   530  			break
   531  		}
   532  		stopAction.Add(1)
   533  		go func(inst *gossipInstance) {
   534  			defer stopAction.Done()
   535  			inst.Stop()
   536  		}(inst)
   537  	}
   538  
   539  	waitUntilOrFailBlocking(t, stopAction.Wait)
   540  }
   541  
   542  func TestGetFullMembership(t *testing.T) {
   543  	t.Parallel()
   544  	nodeNum := 15
   545  	bootPeers := []string{bootPeer(5511), bootPeer(5512)}
   546  	instances := []*gossipInstance{}
   547  	var inst *gossipInstance
   548  
   549  	for i := 3; i <= nodeNum; i++ {
   550  		id := fmt.Sprintf("d%d", i)
   551  		inst = createDiscoveryInstance(5510+i, id, bootPeers)
   552  		instances = append(instances, inst)
   553  	}
   554  
   555  	time.Sleep(time.Second)
   556  
   557  	inst = createDiscoveryInstance(5511, "d1", bootPeers)
   558  	instances = append(instances, inst)
   559  
   560  	inst = createDiscoveryInstance(5512, "d2", bootPeers)
   561  	instances = append(instances, inst)
   562  
   563  	assertMembership(t, instances, nodeNum-1)
   564  
   565  	// Ensure that internal endpoint was propagated to everyone
   566  	for _, inst := range instances {
   567  		for _, member := range inst.GetMembership() {
   568  			assert.NotEmpty(t, member.InternalEndpoint)
   569  			assert.NotEmpty(t, member.Endpoint)
   570  		}
   571  	}
   572  
   573  	// Check that Lookup() is valid
   574  	for _, inst := range instances {
   575  		for _, member := range inst.GetMembership() {
   576  			assert.Equal(t, string(member.PKIid), inst.Lookup(member.PKIid).Endpoint)
   577  			assert.Equal(t, member.PKIid, inst.Lookup(member.PKIid).PKIid)
   578  		}
   579  	}
   580  
   581  	stopInstances(t, instances)
   582  }
   583  
   584  func TestGossipDiscoveryStopping(t *testing.T) {
   585  	t.Parallel()
   586  	inst := createDiscoveryInstance(9611, "d1", []string{bootPeer(9611)})
   587  	time.Sleep(time.Second)
   588  	waitUntilOrFailBlocking(t, inst.Stop)
   589  }
   590  
   591  func TestGossipDiscoverySkipConnectingToLocalhostBootstrap(t *testing.T) {
   592  	t.Parallel()
   593  	inst := createDiscoveryInstance(11611, "d1", []string{"localhost:11611", "127.0.0.1:11611"})
   594  	inst.comm.lock.Lock()
   595  	inst.comm.mock = &mock.Mock{}
   596  	inst.comm.mock.On("SendToPeer", mock.Anything, mock.Anything).Run(func(mock.Arguments) {
   597  		t.Fatal("Should not have connected to any peer")
   598  	})
   599  	inst.comm.mock.On("Ping", mock.Anything).Run(func(mock.Arguments) {
   600  		t.Fatal("Should not have connected to any peer")
   601  	})
   602  	inst.comm.lock.Unlock()
   603  	time.Sleep(time.Second * 3)
   604  	waitUntilOrFailBlocking(t, inst.Stop)
   605  }
   606  
   607  func TestConvergence(t *testing.T) {
   608  	t.Parallel()
   609  	// scenario:
   610  	// {boot peer: [peer list]}
   611  	// {d1: d2, d3, d4}
   612  	// {d5: d6, d7, d8}
   613  	// {d9: d10, d11, d12}
   614  	// connect all boot peers with d13
   615  	// take down d13
   616  	// ensure still full membership
   617  	instances := []*gossipInstance{}
   618  	for _, i := range []int{1, 5, 9} {
   619  		bootPort := 4610 + i
   620  		id := fmt.Sprintf("d%d", i)
   621  		leader := createDiscoveryInstance(bootPort, id, []string{})
   622  		instances = append(instances, leader)
   623  		for minionIndex := 1; minionIndex <= 3; minionIndex++ {
   624  			id := fmt.Sprintf("d%d", i+minionIndex)
   625  			minion := createDiscoveryInstance(4610+minionIndex+i, id, []string{bootPeer(bootPort)})
   626  			instances = append(instances, minion)
   627  		}
   628  	}
   629  
   630  	assertMembership(t, instances, 3)
   631  	connector := createDiscoveryInstance(4623, "d13", []string{bootPeer(4611), bootPeer(4615), bootPeer(4619)})
   632  	instances = append(instances, connector)
   633  	assertMembership(t, instances, 12)
   634  	connector.Stop()
   635  	instances = instances[:len(instances)-1]
   636  	assertMembership(t, instances, 11)
   637  	stopInstances(t, instances)
   638  }
   639  
   640  func TestDisclosurePolicyWithPull(t *testing.T) {
   641  	t.Parallel()
   642  	// Scenario: run 2 groups of peers that simulate 2 organizations:
   643  	// {p0, p1, p2, p3, p4}
   644  	// {p5, p6, p7, p8, p9}
   645  	// Only peers that have an even id have external addresses
   646  	// and only these peers should be published to peers of the other group,
   647  	// while the only ones that need to know about them are peers
   648  	// that have an even id themselves.
   649  	// Furthermore, peers in different sets, should not know about internal addresses of
   650  	// other peers.
   651  
   652  	// This is a bootstrap map that matches for each peer its own bootstrap peer.
   653  	// In practice (production) peers should only use peers of their orgs as bootstrap peers,
   654  	// but the discovery layer is ignorant of organizations.
   655  	bootPeerMap := map[int]int{
   656  		8610: 8616,
   657  		8611: 8610,
   658  		8612: 8610,
   659  		8613: 8610,
   660  		8614: 8610,
   661  		8615: 8616,
   662  		8616: 8610,
   663  		8617: 8616,
   664  		8618: 8616,
   665  		8619: 8616,
   666  	}
   667  
   668  	// This map matches each peer, the peers it should know about in the test scenario.
   669  	peersThatShouldBeKnownToPeers := map[int][]int{
   670  		8610: {8611, 8612, 8613, 8614, 8616, 8618},
   671  		8611: {8610, 8612, 8613, 8614},
   672  		8612: {8610, 8611, 8613, 8614, 8616, 8618},
   673  		8613: {8610, 8611, 8612, 8614},
   674  		8614: {8610, 8611, 8612, 8613, 8616, 8618},
   675  		8615: {8616, 8617, 8618, 8619},
   676  		8616: {8610, 8612, 8614, 8615, 8617, 8618, 8619},
   677  		8617: {8615, 8616, 8618, 8619},
   678  		8618: {8610, 8612, 8614, 8615, 8616, 8617, 8619},
   679  		8619: {8615, 8616, 8617, 8618},
   680  	}
   681  	// Create the peers in the two groups
   682  	instances1, instances2 := createDisjointPeerGroupsWithNoGossip(bootPeerMap)
   683  	// Sleep a while to let them establish membership. This time should be more than enough
   684  	// because the instances are configured to pull membership in very high frequency from
   685  	// up to 10 peers (which results in - pulling from everyone)
   686  	waitUntilOrFail(t, func() bool {
   687  		for _, inst := range append(instances1, instances2...) {
   688  			// Ensure the expected membership is equal in size to the actual membership
   689  			// of each peer.
   690  			portsOfKnownMembers := portsOfMembers(inst.GetMembership())
   691  			if len(peersThatShouldBeKnownToPeers[inst.port]) != len(portsOfKnownMembers) {
   692  				return false
   693  			}
   694  		}
   695  		return true
   696  	})
   697  	for _, inst := range append(instances1, instances2...) {
   698  		portsOfKnownMembers := portsOfMembers(inst.GetMembership())
   699  		// Ensure the expected membership is equal to the actual membership
   700  		// of each peer. the portsOfMembers returns a sorted slice so assert.Equal does the job.
   701  		assert.Equal(t, peersThatShouldBeKnownToPeers[inst.port], portsOfKnownMembers)
   702  		// Next, check that internal endpoints aren't leaked across groups,
   703  		for _, knownPeer := range inst.GetMembership() {
   704  			// If internal endpoint is known, ensure the peers are in the same group
   705  			// unless the peer in question is a peer that has a public address.
   706  			// We cannot control what we disclose about ourselves when we send a membership request
   707  			if len(knownPeer.InternalEndpoint) > 0 && inst.port%2 != 0 {
   708  				bothInGroup1 := portOfEndpoint(knownPeer.Endpoint) < 8615 && inst.port < 8615
   709  				bothInGroup2 := portOfEndpoint(knownPeer.Endpoint) >= 8615 && inst.port >= 8615
   710  				assert.True(t, bothInGroup1 || bothInGroup2, "%v knows about %v's internal endpoint", inst.port, knownPeer.InternalEndpoint)
   711  			}
   712  		}
   713  	}
   714  
   715  	t.Log("Shutting down instance 0...")
   716  	// Now, we shutdown instance 0 and ensure that peers that shouldn't know it,
   717  	// do not know it via membership requests
   718  	stopInstances(t, []*gossipInstance{instances1[0]})
   719  	time.Sleep(time.Second * 3)
   720  	for _, inst := range append(instances1[1:], instances2...) {
   721  		if peersThatShouldBeKnownToPeers[inst.port][0] == 8610 {
   722  			assert.Equal(t, 1, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
   723  		} else {
   724  			assert.Equal(t, 0, inst.Discovery.(*gossipDiscoveryImpl).deadMembership.Size())
   725  		}
   726  	}
   727  	stopInstances(t, instances1[1:])
   728  	stopInstances(t, instances2)
   729  }
   730  
   731  func createDisjointPeerGroupsWithNoGossip(bootPeerMap map[int]int) ([]*gossipInstance, []*gossipInstance) {
   732  	instances1 := []*gossipInstance{}
   733  	instances2 := []*gossipInstance{}
   734  	for group := 0; group < 2; group++ {
   735  		for i := 0; i < 5; i++ {
   736  			group := group
   737  			id := fmt.Sprintf("id%d", group*5+i)
   738  			port := 8610 + group*5 + i
   739  			bootPeers := []string{bootPeer(bootPeerMap[port])}
   740  			pol := discPolForPeer(port)
   741  			inst := createDiscoveryInstanceWithNoGossipWithDisclosurePolicy(8610+group*5+i, id, bootPeers, pol)
   742  			inst.initiateSync(getAliveExpirationTimeout()/3, 10)
   743  			if group == 0 {
   744  				instances1 = append(instances1, inst)
   745  			} else {
   746  				instances2 = append(instances2, inst)
   747  			}
   748  		}
   749  	}
   750  	return instances1, instances2
   751  }
   752  
   753  func discPolForPeer(selfPort int) DisclosurePolicy {
   754  	return func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
   755  		targetPortStr := strings.Split(remotePeer.Endpoint, ":")[1]
   756  		targetPort, _ := strconv.ParseInt(targetPortStr, 10, 64)
   757  		return func(msg *proto.SignedGossipMessage) bool {
   758  				portOfAliveMsgStr := strings.Split(msg.GetAliveMsg().Membership.Endpoint, ":")[1]
   759  				portOfAliveMsg, _ := strconv.ParseInt(portOfAliveMsgStr, 10, 64)
   760  
   761  				if portOfAliveMsg < 8615 && targetPort < 8615 {
   762  					return true
   763  				}
   764  				if portOfAliveMsg >= 8615 && targetPort >= 8615 {
   765  					return true
   766  				}
   767  
   768  				// Else, expose peers with even ids to other peers with even ids
   769  				return portOfAliveMsg%2 == 0 && targetPort%2 == 0
   770  			}, func(msg *proto.SignedGossipMessage) *proto.Envelope {
   771  				if selfPort < 8615 && targetPort >= 8615 {
   772  					msg.Envelope.SecretEnvelope = nil
   773  				}
   774  
   775  				if selfPort >= 8615 && targetPort < 8615 {
   776  					msg.Envelope.SecretEnvelope = nil
   777  				}
   778  
   779  				return msg.Envelope
   780  			}
   781  	}
   782  }
   783  
   784  func TestConfigFromFile(t *testing.T) {
   785  	preAliveTimeInterval := getAliveTimeInterval()
   786  	preAliveExpirationTimeout := getAliveExpirationTimeout()
   787  	preAliveExpirationCheckInterval := getAliveExpirationCheckInterval()
   788  	preReconnectInterval := getReconnectInterval()
   789  
   790  	// Recover the config values in order to avoid impacting other tests
   791  	defer func() {
   792  		SetAliveTimeInterval(preAliveTimeInterval)
   793  		SetAliveExpirationTimeout(preAliveExpirationTimeout)
   794  		SetAliveExpirationCheckInterval(preAliveExpirationCheckInterval)
   795  		SetReconnectInterval(preReconnectInterval)
   796  	}()
   797  
   798  	// Verify if using default values when config is missing
   799  	viper.Reset()
   800  	aliveExpirationCheckInterval = 0 * time.Second
   801  	assert.Equal(t, time.Duration(5)*time.Second, getAliveTimeInterval())
   802  	assert.Equal(t, time.Duration(25)*time.Second, getAliveExpirationTimeout())
   803  	assert.Equal(t, time.Duration(25)*time.Second/10, getAliveExpirationCheckInterval())
   804  	assert.Equal(t, time.Duration(25)*time.Second, getReconnectInterval())
   805  
   806  	//Verify reading the values from config file
   807  	viper.Reset()
   808  	aliveExpirationCheckInterval = 0 * time.Second
   809  	viper.SetConfigName("core")
   810  	viper.SetEnvPrefix("CORE")
   811  	config.AddDevConfigPath(nil)
   812  	viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
   813  	viper.AutomaticEnv()
   814  	err := viper.ReadInConfig()
   815  	assert.NoError(t, err)
   816  	assert.Equal(t, time.Duration(5)*time.Second, getAliveTimeInterval())
   817  	assert.Equal(t, time.Duration(25)*time.Second, getAliveExpirationTimeout())
   818  	assert.Equal(t, time.Duration(25)*time.Second/10, getAliveExpirationCheckInterval())
   819  	assert.Equal(t, time.Duration(25)*time.Second, getReconnectInterval())
   820  }
   821  
   822  func TestMsgStoreExpiration(t *testing.T) {
   823  	// Starts 4 instances, wait for membership to build, stop 2 instances
   824  	// Check that membership in 2 running instances become 2
   825  	// Wait for expiration and check that alive messages and related entities in maps are removed in running instances
   826  	t.Parallel()
   827  	nodeNum := 4
   828  	bootPeers := []string{bootPeer(12611), bootPeer(12612)}
   829  	instances := []*gossipInstance{}
   830  
   831  	inst := createDiscoveryInstance(12611, "d1", bootPeers)
   832  	instances = append(instances, inst)
   833  
   834  	inst = createDiscoveryInstance(12612, "d2", bootPeers)
   835  	instances = append(instances, inst)
   836  
   837  	for i := 3; i <= nodeNum; i++ {
   838  		id := fmt.Sprintf("d%d", i)
   839  		inst = createDiscoveryInstance(12610+i, id, bootPeers)
   840  		instances = append(instances, inst)
   841  	}
   842  
   843  	assertMembership(t, instances, nodeNum-1)
   844  
   845  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
   846  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
   847  
   848  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
   849  
   850  	checkMessages := func() bool {
   851  		for _, inst := range instances[:len(instances)-2] {
   852  			for _, downInst := range instances[len(instances)-2:] {
   853  				downCastInst := inst.discoveryImpl()
   854  				downCastInst.lock.RLock()
   855  				if _, exist := downCastInst.aliveLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
   856  					downCastInst.lock.RUnlock()
   857  					return false
   858  				}
   859  				if _, exist := downCastInst.deadLastTS[string(downInst.discoveryImpl().self.PKIid)]; exist {
   860  					downCastInst.lock.RUnlock()
   861  					return false
   862  				}
   863  				if _, exist := downCastInst.id2Member[string(downInst.discoveryImpl().self.PKIid)]; exist {
   864  					downCastInst.lock.RUnlock()
   865  					return false
   866  				}
   867  				if downCastInst.aliveMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
   868  					downCastInst.lock.RUnlock()
   869  					return false
   870  				}
   871  				if downCastInst.deadMembership.MsgByID(downInst.discoveryImpl().self.PKIid) != nil {
   872  					downCastInst.lock.RUnlock()
   873  					return false
   874  				}
   875  				for _, am := range downCastInst.msgStore.Get() {
   876  					m := am.(*proto.SignedGossipMessage).GetAliveMsg()
   877  					if bytes.Equal(m.Membership.PkiId, downInst.discoveryImpl().self.PKIid) {
   878  						downCastInst.lock.RUnlock()
   879  						return false
   880  					}
   881  				}
   882  				downCastInst.lock.RUnlock()
   883  			}
   884  		}
   885  		return true
   886  	}
   887  
   888  	waitUntilTimeoutOrFail(t, checkMessages, getAliveExpirationTimeout()*(msgExpirationFactor+5))
   889  
   890  	assertMembership(t, instances[:len(instances)-2], nodeNum-3)
   891  
   892  	stopInstances(t, instances[:len(instances)-2])
   893  }
   894  
   895  func TestMsgStoreExpirationWithMembershipMessages(t *testing.T) {
   896  	// Creates 3 discovery instances without gossip communication
   897  	// Generates MembershipRequest msg for each instance using createMembershipRequest
   898  	// Generates Alive msg for each instance using createAliveMessage
   899  	// Builds membership using Alive msgs
   900  	// Checks msgStore and related maps
   901  	// Generates MembershipResponse msgs for each instance using createMembershipResponse
   902  	// Generates new set of Alive msgs and processes them
   903  	// Checks msgStore and related maps
   904  	// Waits for expiration and checks msgStore and related maps
   905  	// Processes stored MembershipRequest msg and checks msgStore and related maps
   906  	// Processes stored MembershipResponse msg and checks msgStore and related maps
   907  
   908  	t.Parallel()
   909  	bootPeers := []string{}
   910  	peersNum := 3
   911  	instances := []*gossipInstance{}
   912  	aliveMsgs := []*proto.SignedGossipMessage{}
   913  	newAliveMsgs := []*proto.SignedGossipMessage{}
   914  	memReqMsgs := []*proto.SignedGossipMessage{}
   915  	memRespMsgs := make(map[int][]*proto.MembershipResponse)
   916  
   917  	for i := 0; i < peersNum; i++ {
   918  		id := fmt.Sprintf("d%d", i)
   919  		inst := createDiscoveryInstanceWithNoGossip(22610+i, id, bootPeers)
   920  		instances = append(instances, inst)
   921  	}
   922  
   923  	// Creating MembershipRequest messages
   924  	for i := 0; i < peersNum; i++ {
   925  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
   926  		sMsg, _ := memReqMsg.NoopSign()
   927  		memReqMsgs = append(memReqMsgs, sMsg)
   928  	}
   929  	// Creating Alive messages
   930  	for i := 0; i < peersNum; i++ {
   931  		aliveMsg, _ := instances[i].discoveryImpl().createAliveMessage(true)
   932  		aliveMsgs = append(aliveMsgs, aliveMsg)
   933  	}
   934  
   935  	repeatForFiltered := func(n int, filter func(i int) bool, action func(i int)) {
   936  		for i := 0; i < n; i++ {
   937  			if filter(i) {
   938  				continue
   939  			}
   940  			action(i)
   941  		}
   942  	}
   943  
   944  	// Handling Alive
   945  	for i := 0; i < peersNum; i++ {
   946  		for k := 0; k < peersNum; k++ {
   947  			instances[i].discoveryImpl().handleMsgFromComm(aliveMsgs[k])
   948  		}
   949  	}
   950  
   951  	checkExistence := func(instances []*gossipInstance, msgs []*proto.SignedGossipMessage, index int, i int, step string) {
   952  		_, exist := instances[index].discoveryImpl().aliveLastTS[string(instances[i].discoveryImpl().self.PKIid)]
   953  		assert.True(t, exist, fmt.Sprint(step, " Data from alive msg ", i, " doesn't exist in aliveLastTS of discovery inst ", index))
   954  
   955  		_, exist = instances[index].discoveryImpl().id2Member[string(string(instances[i].discoveryImpl().self.PKIid))]
   956  		assert.True(t, exist, fmt.Sprint(step, " id2Member mapping doesn't exist for alive msg ", i, " of discovery inst ", index))
   957  
   958  		assert.NotNil(t, instances[index].discoveryImpl().aliveMembership.MsgByID(instances[i].discoveryImpl().self.PKIid), fmt.Sprint(step, " Alive msg", i, " not exist in aliveMembership of discovery inst ", index))
   959  
   960  		assert.Contains(t, instances[index].discoveryImpl().msgStore.Get(), msgs[i], fmt.Sprint(step, " Alive msg ", i, "not stored in store of discovery inst ", index))
   961  	}
   962  
   963  	checkAliveMsgExist := func(instances []*gossipInstance, msgs []*proto.SignedGossipMessage, index int, step string) {
   964  		instances[index].discoveryImpl().lock.RLock()
   965  		defer instances[index].discoveryImpl().lock.RUnlock()
   966  		repeatForFiltered(peersNum,
   967  			func(k int) bool {
   968  				return k == index
   969  			},
   970  			func(k int) {
   971  				checkExistence(instances, msgs, index, k, step)
   972  			})
   973  	}
   974  
   975  	// Checking is Alive was processed
   976  	for i := 0; i < peersNum; i++ {
   977  		checkAliveMsgExist(instances, aliveMsgs, i, "[Step 1 - processing aliveMsg]")
   978  	}
   979  
   980  	// Creating MembershipResponse while all instances have full membership
   981  	for i := 0; i < peersNum; i++ {
   982  		peerToResponse := &NetworkMember{
   983  			Metadata:         []byte{},
   984  			PKIid:            []byte(fmt.Sprintf("localhost:%d", 22610+i)),
   985  			Endpoint:         fmt.Sprintf("localhost:%d", 22610+i),
   986  			InternalEndpoint: fmt.Sprintf("localhost:%d", 22610+i),
   987  		}
   988  		memRespMsgs[i] = []*proto.MembershipResponse{}
   989  		repeatForFiltered(peersNum,
   990  			func(k int) bool {
   991  				return k == i
   992  			},
   993  			func(k int) {
   994  				aliveMsg, _ := instances[k].discoveryImpl().createAliveMessage(true)
   995  				memResp := instances[k].discoveryImpl().createMembershipResponse(aliveMsg, peerToResponse)
   996  				memRespMsgs[i] = append(memRespMsgs[i], memResp)
   997  			})
   998  	}
   999  
  1000  	// Re-creating Alive msgs with highest seq_num, to make sure Alive msgs in memReq and memResp are older
  1001  	for i := 0; i < peersNum; i++ {
  1002  		aliveMsg, _ := instances[i].discoveryImpl().createAliveMessage(true)
  1003  		newAliveMsgs = append(newAliveMsgs, aliveMsg)
  1004  	}
  1005  
  1006  	// Handling new Alive set
  1007  	for i := 0; i < peersNum; i++ {
  1008  		for k := 0; k < peersNum; k++ {
  1009  			instances[i].discoveryImpl().handleMsgFromComm(newAliveMsgs[k])
  1010  		}
  1011  	}
  1012  
  1013  	// Checking is new Alive was processed
  1014  	for i := 0; i < peersNum; i++ {
  1015  		checkAliveMsgExist(instances, newAliveMsgs, i, "[Step 2 - proccesing aliveMsg]")
  1016  	}
  1017  
  1018  	checkAliveMsgNotExist := func(instances []*gossipInstance, msgs []*proto.SignedGossipMessage, index int, step string) {
  1019  		instances[index].discoveryImpl().lock.RLock()
  1020  		defer instances[index].discoveryImpl().lock.RUnlock()
  1021  		assert.Empty(t, instances[index].discoveryImpl().aliveLastTS, fmt.Sprint(step, " Data from alive msg still exists in aliveLastTS of discovery inst ", index))
  1022  		assert.Empty(t, instances[index].discoveryImpl().deadLastTS, fmt.Sprint(step, " Data from alive msg still exists in deadLastTS of discovery inst ", index))
  1023  		assert.Empty(t, instances[index].discoveryImpl().id2Member, fmt.Sprint(step, " id2Member mapping still still contains data related to Alive msg: discovery inst ", index))
  1024  		assert.Empty(t, instances[index].discoveryImpl().msgStore.Get(), fmt.Sprint(step, " Expired Alive msg still stored in store of discovery inst ", index))
  1025  		assert.Zero(t, instances[index].discoveryImpl().aliveMembership.Size(), fmt.Sprint(step, " Alive membership list is not empty, discovery instance", index))
  1026  		assert.Zero(t, instances[index].discoveryImpl().deadMembership.Size(), fmt.Sprint(step, " Dead membership list is not empty, discovery instance", index))
  1027  	}
  1028  
  1029  	// Sleep until expire
  1030  	time.Sleep(getAliveExpirationTimeout() * (msgExpirationFactor + 5))
  1031  
  1032  	// Checking Alive expired
  1033  	for i := 0; i < peersNum; i++ {
  1034  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step3 - expiration in msg store]")
  1035  	}
  1036  
  1037  	// Processing old MembershipRequest
  1038  	for i := 0; i < peersNum; i++ {
  1039  		repeatForFiltered(peersNum,
  1040  			func(k int) bool {
  1041  				return k == i
  1042  			},
  1043  			func(k int) {
  1044  				instances[i].discoveryImpl().handleMsgFromComm(memReqMsgs[k])
  1045  			})
  1046  	}
  1047  
  1048  	// MembershipRequest processing didn't change anything
  1049  	for i := 0; i < peersNum; i++ {
  1050  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step4 - memReq processing after expiration]")
  1051  	}
  1052  
  1053  	// Processing old (later) Alive messages
  1054  	for i := 0; i < peersNum; i++ {
  1055  		for k := 0; k < peersNum; k++ {
  1056  			instances[i].discoveryImpl().handleMsgFromComm(aliveMsgs[k])
  1057  		}
  1058  	}
  1059  
  1060  	// Alive msg processing didn't change anything
  1061  	for i := 0; i < peersNum; i++ {
  1062  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step5.1 - after lost old aliveMsg process]")
  1063  		checkAliveMsgNotExist(instances, newAliveMsgs, i, "[Step5.2 - after lost new aliveMsg process]")
  1064  	}
  1065  
  1066  	// Handling old MembershipResponse messages
  1067  	for i := 0; i < peersNum; i++ {
  1068  		respForPeer := memRespMsgs[i]
  1069  		for _, msg := range respForPeer {
  1070  			sMsg, _ := (&proto.GossipMessage{
  1071  				Tag:   proto.GossipMessage_EMPTY,
  1072  				Nonce: uint64(0),
  1073  				Content: &proto.GossipMessage_MemRes{
  1074  					MemRes: msg,
  1075  				},
  1076  			}).NoopSign()
  1077  			instances[i].discoveryImpl().handleMsgFromComm(sMsg)
  1078  		}
  1079  	}
  1080  
  1081  	// MembershipResponse msg processing didn't change anything
  1082  	for i := 0; i < peersNum; i++ {
  1083  		checkAliveMsgNotExist(instances, aliveMsgs, i, "[Step6 - after lost MembershipResp process]")
  1084  	}
  1085  
  1086  	for i := 0; i < peersNum; i++ {
  1087  		instances[i].Stop()
  1088  	}
  1089  
  1090  }
  1091  
  1092  func TestAliveMsgStore(t *testing.T) {
  1093  	t.Parallel()
  1094  
  1095  	bootPeers := []string{}
  1096  	peersNum := 2
  1097  	instances := []*gossipInstance{}
  1098  	aliveMsgs := []*proto.SignedGossipMessage{}
  1099  	memReqMsgs := []*proto.SignedGossipMessage{}
  1100  
  1101  	for i := 0; i < peersNum; i++ {
  1102  		id := fmt.Sprintf("d%d", i)
  1103  		inst := createDiscoveryInstanceWithNoGossip(32610+i, id, bootPeers)
  1104  		instances = append(instances, inst)
  1105  	}
  1106  
  1107  	// Creating MembershipRequest messages
  1108  	for i := 0; i < peersNum; i++ {
  1109  		memReqMsg, _ := instances[i].discoveryImpl().createMembershipRequest(true)
  1110  		sMsg, _ := memReqMsg.NoopSign()
  1111  		memReqMsgs = append(memReqMsgs, sMsg)
  1112  	}
  1113  	// Creating Alive messages
  1114  	for i := 0; i < peersNum; i++ {
  1115  		aliveMsg, _ := instances[i].discoveryImpl().createAliveMessage(true)
  1116  		aliveMsgs = append(aliveMsgs, aliveMsg)
  1117  	}
  1118  
  1119  	//Check new alive msgs
  1120  	for _, msg := range aliveMsgs {
  1121  		assert.True(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns false on new AliveMsg")
  1122  	}
  1123  
  1124  	// Add new alive msgs
  1125  	for _, msg := range aliveMsgs {
  1126  		assert.True(t, instances[0].discoveryImpl().msgStore.Add(msg), "aliveMsgStore Add returns false on new AliveMsg")
  1127  	}
  1128  
  1129  	// Check exist alive msgs
  1130  	for _, msg := range aliveMsgs {
  1131  		assert.False(t, instances[0].discoveryImpl().msgStore.CheckValid(msg), "aliveMsgStore CheckValid returns true on existing AliveMsg")
  1132  	}
  1133  
  1134  	// Check non-alive msgs
  1135  	for _, msg := range memReqMsgs {
  1136  		assert.Panics(t, func() { instances[1].discoveryImpl().msgStore.CheckValid(msg) }, "aliveMsgStore CheckValid should panic on new MembershipRequest msg")
  1137  		assert.Panics(t, func() { instances[1].discoveryImpl().msgStore.Add(msg) }, "aliveMsgStore Add should panic on new MembershipRequest msg")
  1138  	}
  1139  }
  1140  
  1141  func TestMemRespDisclosurePol(t *testing.T) {
  1142  	t.Parallel()
  1143  	pol := func(remotePeer *NetworkMember) (Sieve, EnvelopeFilter) {
  1144  		return func(_ *proto.SignedGossipMessage) bool {
  1145  				return remotePeer.Endpoint == "localhost:7880"
  1146  			}, func(m *proto.SignedGossipMessage) *proto.Envelope {
  1147  				return m.Envelope
  1148  			}
  1149  	}
  1150  	d1 := createDiscoveryInstanceThatGossips(7878, "d1", []string{}, true, pol)
  1151  	defer d1.Stop()
  1152  	d2 := createDiscoveryInstanceThatGossips(7879, "d2", []string{"localhost:7878"}, true, noopPolicy)
  1153  	defer d2.Stop()
  1154  	d3 := createDiscoveryInstanceThatGossips(7880, "d3", []string{"localhost:7878"}, true, noopPolicy)
  1155  	defer d3.Stop()
  1156  	// Both d1 and d3 know each other, and also about d2
  1157  	assertMembership(t, []*gossipInstance{d1, d3}, 2)
  1158  	// d2 doesn't know about any one because the bootstrap peer is ignoring it due to custom policy
  1159  	assertMembership(t, []*gossipInstance{d2}, 0)
  1160  	assert.Zero(t, d2.receivedMsgCount())
  1161  	assert.NotZero(t, d2.sentMsgCount())
  1162  }
  1163  
  1164  func waitUntilOrFail(t *testing.T, pred func() bool) {
  1165  	waitUntilTimeoutOrFail(t, pred, timeout)
  1166  }
  1167  
  1168  func waitUntilTimeoutOrFail(t *testing.T, pred func() bool, timeout time.Duration) {
  1169  	start := time.Now()
  1170  	limit := start.UnixNano() + timeout.Nanoseconds()
  1171  	for time.Now().UnixNano() < limit {
  1172  		if pred() {
  1173  			return
  1174  		}
  1175  		time.Sleep(timeout / 10)
  1176  	}
  1177  	assert.Fail(t, "Timeout expired!")
  1178  }
  1179  
  1180  func waitUntilOrFailBlocking(t *testing.T, f func()) {
  1181  	successChan := make(chan struct{}, 1)
  1182  	go func() {
  1183  		f()
  1184  		successChan <- struct{}{}
  1185  	}()
  1186  	select {
  1187  	case <-time.NewTimer(timeout).C:
  1188  		break
  1189  	case <-successChan:
  1190  		return
  1191  	}
  1192  	assert.Fail(t, "Timeout expired!")
  1193  }
  1194  
  1195  func stopInstances(t *testing.T, instances []*gossipInstance) {
  1196  	stopAction := &sync.WaitGroup{}
  1197  	for _, inst := range instances {
  1198  		stopAction.Add(1)
  1199  		go func(inst *gossipInstance) {
  1200  			defer stopAction.Done()
  1201  			inst.Stop()
  1202  		}(inst)
  1203  	}
  1204  
  1205  	waitUntilOrFailBlocking(t, stopAction.Wait)
  1206  }
  1207  
  1208  func assertMembership(t *testing.T, instances []*gossipInstance, expectedNum int) {
  1209  	wg := sync.WaitGroup{}
  1210  	wg.Add(len(instances))
  1211  
  1212  	ctx, cancelation := context.WithTimeout(context.Background(), timeout)
  1213  	defer cancelation()
  1214  
  1215  	for _, inst := range instances {
  1216  		go func(ctx context.Context, i *gossipInstance) {
  1217  			defer wg.Done()
  1218  			for {
  1219  				select {
  1220  				case <-ctx.Done():
  1221  					return
  1222  				case <-time.After(timeout / 10):
  1223  					if len(i.GetMembership()) == expectedNum {
  1224  						return
  1225  					}
  1226  				}
  1227  			}
  1228  		}(ctx, inst)
  1229  	}
  1230  
  1231  	wg.Wait()
  1232  	assert.NoError(t, ctx.Err(), "Timeout expired!")
  1233  }
  1234  
  1235  func portsOfMembers(members []NetworkMember) []int {
  1236  	ports := make([]int, len(members))
  1237  	for i := range members {
  1238  		ports[i] = portOfEndpoint(members[i].Endpoint)
  1239  	}
  1240  	sort.Ints(ports)
  1241  	return ports
  1242  }
  1243  
  1244  func portOfEndpoint(endpoint string) int {
  1245  	port, _ := strconv.ParseInt(strings.Split(endpoint, ":")[1], 10, 64)
  1246  	return int(port)
  1247  }