github.com/adnan-c/fabric_e2e_couchdb@v0.6.1-preview.0.20170228180935-21ce6b23cf91/gossip/discovery/discovery_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8  		 http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package discovery
    18  
    19  import (
    20  	"fmt"
    21  	"io"
    22  	"net"
    23  	"strings"
    24  	"sync"
    25  	"sync/atomic"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/hyperledger/fabric/gossip/common"
    30  	proto "github.com/hyperledger/fabric/protos/gossip"
    31  	"github.com/spf13/viper"
    32  	"github.com/stretchr/testify/assert"
    33  	"github.com/stretchr/testify/mock"
    34  	"golang.org/x/net/context"
    35  	"google.golang.org/grpc"
    36  )
    37  
    38  var timeout = time.Second * time.Duration(15)
    39  
    40  func init() {
    41  	aliveTimeInterval := time.Duration(time.Millisecond * 100)
    42  	SetAliveTimeInterval(aliveTimeInterval)
    43  	SetAliveExpirationTimeout(10 * aliveTimeInterval)
    44  	SetAliveExpirationCheckInterval(aliveTimeInterval)
    45  	SetReconnectInterval(10 * aliveTimeInterval)
    46  }
    47  
    48  type dummyCommModule struct {
    49  	id           string
    50  	presumeDead  chan common.PKIidType
    51  	detectedDead chan string
    52  	streams      map[string]proto.Gossip_GossipStreamClient
    53  	conns        map[string]*grpc.ClientConn
    54  	lock         *sync.RWMutex
    55  	incMsgs      chan *proto.SignedGossipMessage
    56  	lastSeqs     map[string]uint64
    57  	shouldGossip bool
    58  	mock         *mock.Mock
    59  }
    60  
    61  type gossipMsg struct {
    62  	*proto.GossipMessage
    63  }
    64  
    65  func (m *gossipMsg) GetGossipMessage() *proto.GossipMessage {
    66  	return m.GossipMessage
    67  }
    68  
    69  type gossipInstance struct {
    70  	comm *dummyCommModule
    71  	Discovery
    72  	gRGCserv     *grpc.Server
    73  	lsnr         net.Listener
    74  	shouldGossip bool
    75  }
    76  
    77  func (comm *dummyCommModule) ValidateAliveMsg(am *proto.SignedGossipMessage) bool {
    78  	return true
    79  }
    80  
    81  func (comm *dummyCommModule) SignMessage(am *proto.GossipMessage, internalEndpoint string) *proto.Envelope {
    82  	am.NoopSign()
    83  
    84  	secret := &proto.Secret{
    85  		Content: &proto.Secret_InternalEndpoint{
    86  			InternalEndpoint: internalEndpoint,
    87  		},
    88  	}
    89  	signer := func(msg []byte) ([]byte, error) {
    90  		return nil, nil
    91  	}
    92  	env := am.NoopSign().Envelope
    93  	env.SignSecret(signer, secret)
    94  	return env
    95  }
    96  
    97  func (comm *dummyCommModule) Gossip(msg *proto.SignedGossipMessage) {
    98  	if !comm.shouldGossip {
    99  		return
   100  	}
   101  	comm.lock.Lock()
   102  	defer comm.lock.Unlock()
   103  	for _, conn := range comm.streams {
   104  		conn.Send(msg.Envelope)
   105  	}
   106  }
   107  
   108  func (comm *dummyCommModule) SendToPeer(peer *NetworkMember, msg *proto.SignedGossipMessage) {
   109  	comm.lock.RLock()
   110  	_, exists := comm.streams[peer.Endpoint]
   111  	mock := comm.mock
   112  	comm.lock.RUnlock()
   113  
   114  	if mock != nil {
   115  		mock.Called()
   116  	}
   117  
   118  	if !exists {
   119  		if comm.Ping(peer) == false {
   120  			fmt.Printf("Ping to %v failed\n", peer.Endpoint)
   121  			return
   122  		}
   123  	}
   124  	comm.lock.Lock()
   125  	comm.streams[peer.Endpoint].Send(msg.NoopSign().Envelope)
   126  	comm.lock.Unlock()
   127  }
   128  
   129  func (comm *dummyCommModule) Ping(peer *NetworkMember) bool {
   130  	comm.lock.Lock()
   131  	defer comm.lock.Unlock()
   132  
   133  	if comm.mock != nil {
   134  		comm.mock.Called()
   135  	}
   136  
   137  	_, alreadyExists := comm.streams[peer.Endpoint]
   138  	if !alreadyExists {
   139  		newConn, err := grpc.Dial(peer.Endpoint, grpc.WithInsecure())
   140  		if err != nil {
   141  			return false
   142  		}
   143  		if stream, err := proto.NewGossipClient(newConn).GossipStream(context.Background()); err == nil {
   144  			comm.conns[peer.Endpoint] = newConn
   145  			comm.streams[peer.Endpoint] = stream
   146  			return true
   147  		}
   148  		return false
   149  	}
   150  	conn := comm.conns[peer.Endpoint]
   151  	if _, err := proto.NewGossipClient(conn).Ping(context.Background(), &proto.Empty{}); err != nil {
   152  		return false
   153  	}
   154  	return true
   155  }
   156  
   157  func (comm *dummyCommModule) Accept() <-chan *proto.SignedGossipMessage {
   158  	return comm.incMsgs
   159  }
   160  
   161  func (comm *dummyCommModule) PresumedDead() <-chan common.PKIidType {
   162  	return comm.presumeDead
   163  }
   164  
   165  func (comm *dummyCommModule) CloseConn(peer *NetworkMember) {
   166  	comm.lock.Lock()
   167  	defer comm.lock.Unlock()
   168  
   169  	if _, exists := comm.streams[peer.Endpoint]; !exists {
   170  		return
   171  	}
   172  
   173  	comm.streams[peer.Endpoint].CloseSend()
   174  	comm.conns[peer.Endpoint].Close()
   175  }
   176  
   177  func (g *gossipInstance) GossipStream(stream proto.Gossip_GossipStreamServer) error {
   178  	for {
   179  		envelope, err := stream.Recv()
   180  		if err == io.EOF {
   181  			return nil
   182  		}
   183  		if err != nil {
   184  			return err
   185  		}
   186  		lgr := g.Discovery.(*gossipDiscoveryImpl).logger
   187  		gMsg, err := envelope.ToGossipMessage()
   188  		if err != nil {
   189  			lgr.Warning("Failed deserializing GossipMessage from envelope:", err)
   190  			continue
   191  		}
   192  
   193  		lgr.Debug(g.Discovery.Self().Endpoint, "Got message:", gMsg)
   194  		g.comm.incMsgs <- gMsg
   195  
   196  		if aliveMsg := gMsg.GetAliveMsg(); aliveMsg != nil {
   197  			g.tryForwardMessage(gMsg)
   198  		}
   199  	}
   200  }
   201  
   202  func (g *gossipInstance) tryForwardMessage(msg *proto.SignedGossipMessage) {
   203  	g.comm.lock.Lock()
   204  
   205  	aliveMsg := msg.GetAliveMsg()
   206  
   207  	forward := false
   208  	id := string(aliveMsg.Membership.PkiID)
   209  	seqNum := aliveMsg.Timestamp.SeqNum
   210  	if last, exists := g.comm.lastSeqs[id]; exists {
   211  		if last < seqNum {
   212  			g.comm.lastSeqs[id] = seqNum
   213  			forward = true
   214  		}
   215  	} else {
   216  		g.comm.lastSeqs[id] = seqNum
   217  		forward = true
   218  	}
   219  
   220  	g.comm.lock.Unlock()
   221  
   222  	if forward {
   223  		g.comm.Gossip(msg)
   224  	}
   225  }
   226  
   227  func (g *gossipInstance) Stop() {
   228  	g.gRGCserv.Stop()
   229  	g.lsnr.Close()
   230  	for _, stream := range g.comm.streams {
   231  		stream.CloseSend()
   232  	}
   233  	for _, conn := range g.comm.conns {
   234  		conn.Close()
   235  	}
   236  	g.Discovery.Stop()
   237  }
   238  
   239  func (g *gossipInstance) Ping(context.Context, *proto.Empty) (*proto.Empty, error) {
   240  	return &proto.Empty{}, nil
   241  }
   242  
   243  func createDiscoveryInstance(port int, id string, bootstrapPeers []string) *gossipInstance {
   244  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, true)
   245  }
   246  
   247  func createDiscoveryInstanceWithNoGossip(port int, id string, bootstrapPeers []string) *gossipInstance {
   248  	return createDiscoveryInstanceThatGossips(port, id, bootstrapPeers, false)
   249  }
   250  
   251  func createDiscoveryInstanceThatGossips(port int, id string, bootstrapPeers []string, shouldGossip bool) *gossipInstance {
   252  	comm := &dummyCommModule{
   253  		conns:        make(map[string]*grpc.ClientConn),
   254  		streams:      make(map[string]proto.Gossip_GossipStreamClient),
   255  		incMsgs:      make(chan *proto.SignedGossipMessage, 1000),
   256  		presumeDead:  make(chan common.PKIidType, 10000),
   257  		id:           id,
   258  		detectedDead: make(chan string, 10000),
   259  		lock:         &sync.RWMutex{},
   260  		lastSeqs:     make(map[string]uint64),
   261  		shouldGossip: shouldGossip,
   262  	}
   263  
   264  	endpoint := fmt.Sprintf("localhost:%d", port)
   265  	self := NetworkMember{
   266  		Metadata:         []byte{},
   267  		PKIid:            []byte(endpoint),
   268  		Endpoint:         endpoint,
   269  		InternalEndpoint: endpoint,
   270  	}
   271  
   272  	listenAddress := fmt.Sprintf("%s:%d", "", port)
   273  	ll, err := net.Listen("tcp", listenAddress)
   274  	if err != nil {
   275  		fmt.Printf("Error listening on %v, %v", listenAddress, err)
   276  	}
   277  	s := grpc.NewServer()
   278  
   279  	discSvc := NewDiscoveryService(bootstrapPeers, self, comm, comm)
   280  	gossInst := &gossipInstance{comm: comm, gRGCserv: s, Discovery: discSvc, lsnr: ll, shouldGossip: shouldGossip}
   281  
   282  	proto.RegisterGossipServer(s, gossInst)
   283  	go s.Serve(ll)
   284  
   285  	return gossInst
   286  }
   287  
   288  func bootPeer(port int) string {
   289  	return fmt.Sprintf("localhost:%d", port)
   290  }
   291  
   292  func TestConnect(t *testing.T) {
   293  	t.Parallel()
   294  	nodeNum := 10
   295  	instances := []*gossipInstance{}
   296  	for i := 0; i < nodeNum; i++ {
   297  		inst := createDiscoveryInstance(7611+i, fmt.Sprintf("d%d", i), []string{})
   298  		instances = append(instances, inst)
   299  		j := (i + 1) % 10
   300  		endpoint := fmt.Sprintf("localhost:%d", 7611+j)
   301  		netMember2Connect2 := NetworkMember{Endpoint: endpoint, PKIid: []byte(endpoint)}
   302  		inst.Connect(netMember2Connect2)
   303  	}
   304  
   305  	fullMembership := func() bool {
   306  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   307  	}
   308  	waitUntilOrFail(t, fullMembership)
   309  	stopInstances(t, instances)
   310  }
   311  
   312  func TestUpdate(t *testing.T) {
   313  	t.Parallel()
   314  	nodeNum := 5
   315  	bootPeers := []string{bootPeer(6611), bootPeer(6612)}
   316  	instances := []*gossipInstance{}
   317  
   318  	inst := createDiscoveryInstance(6611, "d1", bootPeers)
   319  	instances = append(instances, inst)
   320  
   321  	inst = createDiscoveryInstance(6612, "d2", bootPeers)
   322  	instances = append(instances, inst)
   323  
   324  	for i := 3; i <= nodeNum; i++ {
   325  		id := fmt.Sprintf("d%d", i)
   326  		inst = createDiscoveryInstance(6610+i, id, bootPeers)
   327  		instances = append(instances, inst)
   328  	}
   329  
   330  	fullMembership := func() bool {
   331  		return nodeNum-1 == len(instances[nodeNum-1].GetMembership())
   332  	}
   333  
   334  	waitUntilOrFail(t, fullMembership)
   335  
   336  	instances[0].UpdateMetadata([]byte("bla bla"))
   337  	instances[nodeNum-1].UpdateEndpoint("localhost:5511")
   338  
   339  	checkMembership := func() bool {
   340  		for _, member := range instances[nodeNum-1].GetMembership() {
   341  			if string(member.PKIid) == instances[0].comm.id {
   342  				if "bla bla" != string(member.Metadata) {
   343  					return false
   344  				}
   345  			}
   346  		}
   347  
   348  		for _, member := range instances[0].GetMembership() {
   349  			if string(member.PKIid) == instances[nodeNum-1].comm.id {
   350  				if "localhost:5511" != string(member.Endpoint) {
   351  					return false
   352  				}
   353  			}
   354  		}
   355  		return true
   356  	}
   357  
   358  	waitUntilOrFail(t, checkMembership)
   359  	stopInstances(t, instances)
   360  }
   361  
   362  func TestInitiateSync(t *testing.T) {
   363  	t.Parallel()
   364  	nodeNum := 10
   365  	bootPeers := []string{bootPeer(3611), bootPeer(3612)}
   366  	instances := []*gossipInstance{}
   367  
   368  	toDie := int32(0)
   369  	for i := 1; i <= nodeNum; i++ {
   370  		id := fmt.Sprintf("d%d", i)
   371  		inst := createDiscoveryInstanceWithNoGossip(3610+i, id, bootPeers)
   372  		instances = append(instances, inst)
   373  		go func() {
   374  			for {
   375  				if atomic.LoadInt32(&toDie) == int32(1) {
   376  					return
   377  				}
   378  				time.Sleep(getAliveExpirationTimeout() / 3)
   379  				inst.InitiateSync(9)
   380  			}
   381  		}()
   382  	}
   383  	time.Sleep(getAliveExpirationTimeout() * 4)
   384  	assertMembership(t, instances, nodeNum-1)
   385  	atomic.StoreInt32(&toDie, int32(1))
   386  	stopInstances(t, instances)
   387  }
   388  
   389  func TestExpiration(t *testing.T) {
   390  	t.Parallel()
   391  	nodeNum := 5
   392  	bootPeers := []string{bootPeer(2611), bootPeer(2612)}
   393  	instances := []*gossipInstance{}
   394  
   395  	inst := createDiscoveryInstance(2611, "d1", bootPeers)
   396  	instances = append(instances, inst)
   397  
   398  	inst = createDiscoveryInstance(2612, "d2", bootPeers)
   399  	instances = append(instances, inst)
   400  
   401  	for i := 3; i <= nodeNum; i++ {
   402  		id := fmt.Sprintf("d%d", i)
   403  		inst = createDiscoveryInstance(2610+i, id, bootPeers)
   404  		instances = append(instances, inst)
   405  	}
   406  
   407  	assertMembership(t, instances, nodeNum-1)
   408  
   409  	waitUntilOrFailBlocking(t, instances[nodeNum-1].Stop)
   410  	waitUntilOrFailBlocking(t, instances[nodeNum-2].Stop)
   411  
   412  	assertMembership(t, instances, nodeNum-3)
   413  
   414  	stopAction := &sync.WaitGroup{}
   415  	for i, inst := range instances {
   416  		if i+2 == nodeNum {
   417  			break
   418  		}
   419  		stopAction.Add(1)
   420  		go func(inst *gossipInstance) {
   421  			defer stopAction.Done()
   422  			inst.Stop()
   423  		}(inst)
   424  	}
   425  
   426  	waitUntilOrFailBlocking(t, stopAction.Wait)
   427  }
   428  
   429  func TestGetFullMembership(t *testing.T) {
   430  	t.Parallel()
   431  	nodeNum := 15
   432  	bootPeers := []string{bootPeer(5511), bootPeer(5512)}
   433  	instances := []*gossipInstance{}
   434  	var inst *gossipInstance
   435  
   436  	for i := 3; i <= nodeNum; i++ {
   437  		id := fmt.Sprintf("d%d", i)
   438  		inst = createDiscoveryInstance(5510+i, id, bootPeers)
   439  		instances = append(instances, inst)
   440  	}
   441  
   442  	time.Sleep(time.Second)
   443  
   444  	inst = createDiscoveryInstance(5511, "d1", bootPeers)
   445  	instances = append(instances, inst)
   446  
   447  	inst = createDiscoveryInstance(5512, "d2", bootPeers)
   448  	instances = append(instances, inst)
   449  
   450  	assertMembership(t, instances, nodeNum-1)
   451  
   452  	// Ensure that internal endpoint was propagated to everyone
   453  	for _, inst := range instances {
   454  		for _, member := range inst.GetMembership() {
   455  			assert.NotEmpty(t, member.InternalEndpoint)
   456  			assert.NotEmpty(t, member.Endpoint)
   457  		}
   458  	}
   459  
   460  	// Check that Exists() is valid
   461  	for _, inst := range instances {
   462  		for _, member := range inst.GetMembership() {
   463  			assert.True(t, inst.Exists(member.PKIid))
   464  		}
   465  	}
   466  
   467  	stopInstances(t, instances)
   468  }
   469  
   470  func TestGossipDiscoveryStopping(t *testing.T) {
   471  	t.Parallel()
   472  	inst := createDiscoveryInstance(9611, "d1", []string{bootPeer(9611)})
   473  	time.Sleep(time.Second)
   474  	waitUntilOrFailBlocking(t, inst.Stop)
   475  
   476  }
   477  
   478  func TestGossipDiscoverySkipConnectingToLocalhostBootstrap(t *testing.T) {
   479  	t.Parallel()
   480  	inst := createDiscoveryInstance(11611, "d1", []string{"localhost:11611", "127.0.0.1:11611"})
   481  	inst.comm.lock.Lock()
   482  	inst.comm.mock = &mock.Mock{}
   483  	inst.comm.mock.On("SendToPeer", mock.Anything).Run(func(mock.Arguments) {
   484  		t.Fatal("Should not have connected to any peer")
   485  	})
   486  	inst.comm.mock.On("Ping", mock.Anything).Run(func(mock.Arguments) {
   487  		t.Fatal("Should not have connected to any peer")
   488  	})
   489  	inst.comm.lock.Unlock()
   490  	time.Sleep(time.Second * 3)
   491  	waitUntilOrFailBlocking(t, inst.Stop)
   492  }
   493  
   494  func TestConvergence(t *testing.T) {
   495  	t.Parallel()
   496  	// scenario:
   497  	// {boot peer: [peer list]}
   498  	// {d1: d2, d3, d4}
   499  	// {d5: d6, d7, d8}
   500  	// {d9: d10, d11, d12}
   501  	// connect all boot peers with d13
   502  	// take down d13
   503  	// ensure still full membership
   504  	instances := []*gossipInstance{}
   505  	for _, i := range []int{1, 5, 9} {
   506  		bootPort := 4610 + i
   507  		id := fmt.Sprintf("d%d", i)
   508  		leader := createDiscoveryInstance(bootPort, id, []string{})
   509  		instances = append(instances, leader)
   510  		for minionIndex := 1; minionIndex <= 3; minionIndex++ {
   511  			id := fmt.Sprintf("d%d", i+minionIndex)
   512  			minion := createDiscoveryInstance(4610+minionIndex+i, id, []string{bootPeer(bootPort)})
   513  			instances = append(instances, minion)
   514  		}
   515  	}
   516  
   517  	assertMembership(t, instances, 3)
   518  	connector := createDiscoveryInstance(4623, "d13", []string{bootPeer(4611), bootPeer(4615), bootPeer(4619)})
   519  	instances = append(instances, connector)
   520  	assertMembership(t, instances, 12)
   521  	connector.Stop()
   522  	instances = instances[:len(instances)-1]
   523  	assertMembership(t, instances, 11)
   524  	stopInstances(t, instances)
   525  }
   526  
   527  func TestConfigFromFile(t *testing.T) {
   528  	preAliveTimeInterval := getAliveTimeInterval()
   529  	preAliveExpirationTimeout := getAliveExpirationTimeout()
   530  	preAliveExpirationCheckInterval := getAliveExpirationCheckInterval()
   531  	preReconnectInterval := getReconnectInterval()
   532  
   533  	// Recover the config values in order to avoid impacting other tests
   534  	defer func() {
   535  		SetAliveTimeInterval(preAliveTimeInterval)
   536  		SetAliveExpirationTimeout(preAliveExpirationTimeout)
   537  		SetAliveExpirationCheckInterval(preAliveExpirationCheckInterval)
   538  		SetReconnectInterval(preReconnectInterval)
   539  	}()
   540  
   541  	// Verify if using default values when config is missing
   542  	viper.Reset()
   543  	aliveExpirationCheckInterval = 0 * time.Second
   544  	assert.Equal(t, time.Duration(5)*time.Second, getAliveTimeInterval())
   545  	assert.Equal(t, time.Duration(25)*time.Second, getAliveExpirationTimeout())
   546  	assert.Equal(t, time.Duration(25)*time.Second/10, getAliveExpirationCheckInterval())
   547  	assert.Equal(t, time.Duration(25)*time.Second, getReconnectInterval())
   548  
   549  	//Verify reading the values from config file
   550  	viper.Reset()
   551  	aliveExpirationCheckInterval = 0 * time.Second
   552  	viper.SetConfigName("core")
   553  	viper.SetEnvPrefix("CORE")
   554  	viper.AddConfigPath("./../../peer")
   555  	viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
   556  	viper.AutomaticEnv()
   557  	err := viper.ReadInConfig()
   558  	assert.NoError(t, err)
   559  	assert.Equal(t, time.Duration(5)*time.Second, getAliveTimeInterval())
   560  	assert.Equal(t, time.Duration(25)*time.Second, getAliveExpirationTimeout())
   561  	assert.Equal(t, time.Duration(25)*time.Second/10, getAliveExpirationCheckInterval())
   562  	assert.Equal(t, time.Duration(25)*time.Second, getReconnectInterval())
   563  }
   564  
   565  func TestFilterOutLocalhost(t *testing.T) {
   566  	t.Parallel()
   567  	endpoints := []string{"localhost:5611", "127.0.0.1:5611", "1.2.3.4:5611"}
   568  	assert.Len(t, filterOutLocalhost(endpoints, 5611), 1)
   569  	endpoints = []string{"1.2.3.4:5611"}
   570  	assert.Len(t, filterOutLocalhost(endpoints, 5611), 1)
   571  	endpoints = []string{"localhost:5611", "127.0.0.1:5611"}
   572  	assert.Len(t, filterOutLocalhost(endpoints, 5611), 0)
   573  	// Check slice returned is a copy
   574  	endpoints = []string{"localhost:5611", "127.0.0.1:5611", "1.2.3.4:5611"}
   575  	endpoints2 := filterOutLocalhost(endpoints, 5611)
   576  	endpoints2[0] = "bla bla"
   577  	assert.NotEqual(t, endpoints[2], endpoints[0])
   578  }
   579  
   580  func waitUntilOrFail(t *testing.T, pred func() bool) {
   581  	start := time.Now()
   582  	limit := start.UnixNano() + timeout.Nanoseconds()
   583  	for time.Now().UnixNano() < limit {
   584  		if pred() {
   585  			return
   586  		}
   587  		time.Sleep(timeout / 10)
   588  	}
   589  	assert.Fail(t, "Timeout expired!")
   590  }
   591  
   592  func waitUntilOrFailBlocking(t *testing.T, f func()) {
   593  	successChan := make(chan struct{}, 1)
   594  	go func() {
   595  		f()
   596  		successChan <- struct{}{}
   597  	}()
   598  	select {
   599  	case <-time.NewTimer(timeout).C:
   600  		break
   601  	case <-successChan:
   602  		return
   603  	}
   604  	assert.Fail(t, "Timeout expired!")
   605  }
   606  
   607  func stopInstances(t *testing.T, instances []*gossipInstance) {
   608  	stopAction := &sync.WaitGroup{}
   609  	for _, inst := range instances {
   610  		stopAction.Add(1)
   611  		go func(inst *gossipInstance) {
   612  			defer stopAction.Done()
   613  			inst.Stop()
   614  		}(inst)
   615  	}
   616  
   617  	waitUntilOrFailBlocking(t, stopAction.Wait)
   618  }
   619  
   620  func assertMembership(t *testing.T, instances []*gossipInstance, expectedNum int) {
   621  	fullMembership := func() bool {
   622  		for _, inst := range instances {
   623  			if len(inst.GetMembership()) == expectedNum {
   624  				return true
   625  			}
   626  		}
   627  		return false
   628  	}
   629  	waitUntilOrFail(t, fullMembership)
   630  }