github.com/myafeier/fabric@v1.0.1-0.20170722181825-3a4b1f2bce86/gossip/election/election_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package election
     8  
     9  import (
    10  	"fmt"
    11  	"strings"
    12  	"sync"
    13  	"sync/atomic"
    14  	"testing"
    15  	"time"
    16  
    17  	"github.com/hyperledger/fabric/core/config"
    18  	"github.com/hyperledger/fabric/gossip/util"
    19  	"github.com/spf13/viper"
    20  	"github.com/stretchr/testify/assert"
    21  	"github.com/stretchr/testify/mock"
    22  )
    23  
    24  const (
    25  	testTimeout      = 5 * time.Second
    26  	testPollInterval = time.Millisecond * 300
    27  )
    28  
    29  func init() {
    30  	util.SetupTestLogging()
    31  	SetStartupGracePeriod(time.Millisecond * 500)
    32  	SetMembershipSampleInterval(time.Millisecond * 100)
    33  	SetLeaderAliveThreshold(time.Millisecond * 500)
    34  	SetLeaderElectionDuration(time.Millisecond * 500)
    35  }
    36  
    37  type msg struct {
    38  	sender   string
    39  	proposal bool
    40  }
    41  
    42  func (m *msg) SenderID() peerID {
    43  	return peerID(m.sender)
    44  }
    45  
    46  func (m *msg) IsProposal() bool {
    47  	return m.proposal
    48  }
    49  
    50  func (m *msg) IsDeclaration() bool {
    51  	return !m.proposal
    52  }
    53  
    54  type peer struct {
    55  	mockedMethods map[string]struct{}
    56  	mock.Mock
    57  	id                 string
    58  	peers              map[string]*peer
    59  	sharedLock         *sync.RWMutex
    60  	msgChan            chan Msg
    61  	leaderFromCallback bool
    62  	callbackInvoked    bool
    63  	lock               sync.RWMutex
    64  	LeaderElectionService
    65  }
    66  
    67  func (p *peer) On(methodName string, arguments ...interface{}) *mock.Call {
    68  	p.sharedLock.Lock()
    69  	defer p.sharedLock.Unlock()
    70  	p.mockedMethods[methodName] = struct{}{}
    71  	return p.Mock.On(methodName, arguments...)
    72  }
    73  
    74  func (p *peer) ID() peerID {
    75  	return peerID(p.id)
    76  }
    77  
    78  func (p *peer) Gossip(m Msg) {
    79  	p.sharedLock.RLock()
    80  	defer p.sharedLock.RUnlock()
    81  
    82  	if _, isMocked := p.mockedMethods["Gossip"]; isMocked {
    83  		p.Called(m)
    84  		return
    85  	}
    86  
    87  	for _, peer := range p.peers {
    88  		if peer.id == p.id {
    89  			continue
    90  		}
    91  		peer.msgChan <- m.(*msg)
    92  	}
    93  }
    94  
    95  func (p *peer) Accept() <-chan Msg {
    96  	p.sharedLock.RLock()
    97  	defer p.sharedLock.RUnlock()
    98  
    99  	if _, isMocked := p.mockedMethods["Accept"]; isMocked {
   100  		args := p.Called()
   101  		return args.Get(0).(<-chan Msg)
   102  	}
   103  	return (<-chan Msg)(p.msgChan)
   104  }
   105  
   106  func (p *peer) CreateMessage(isDeclaration bool) Msg {
   107  	return &msg{proposal: !isDeclaration, sender: p.id}
   108  }
   109  
   110  func (p *peer) Peers() []Peer {
   111  	p.sharedLock.RLock()
   112  	defer p.sharedLock.RUnlock()
   113  
   114  	if _, isMocked := p.mockedMethods["Peers"]; isMocked {
   115  		args := p.Called()
   116  		return args.Get(0).([]Peer)
   117  	}
   118  
   119  	var peers []Peer
   120  	for id := range p.peers {
   121  		peers = append(peers, &peer{id: id})
   122  	}
   123  	return peers
   124  }
   125  
   126  func (p *peer) leaderCallback(isLeader bool) {
   127  	p.lock.Lock()
   128  	defer p.lock.Unlock()
   129  	p.leaderFromCallback = isLeader
   130  	p.callbackInvoked = true
   131  }
   132  
   133  func (p *peer) isLeaderFromCallback() bool {
   134  	p.lock.RLock()
   135  	defer p.lock.RUnlock()
   136  	return p.leaderFromCallback
   137  }
   138  
   139  func (p *peer) isCallbackInvoked() bool {
   140  	p.lock.RLock()
   141  	defer p.lock.RUnlock()
   142  	return p.callbackInvoked
   143  }
   144  
   145  func createPeers(spawnInterval time.Duration, ids ...int) []*peer {
   146  	peers := make([]*peer, len(ids))
   147  	peerMap := make(map[string]*peer)
   148  	l := &sync.RWMutex{}
   149  	for i, id := range ids {
   150  		p := createPeer(id, peerMap, l)
   151  		if spawnInterval != 0 {
   152  			time.Sleep(spawnInterval)
   153  		}
   154  		peers[i] = p
   155  	}
   156  	return peers
   157  }
   158  
   159  func createPeer(id int, peerMap map[string]*peer, l *sync.RWMutex) *peer {
   160  	idStr := fmt.Sprintf("p%d", id)
   161  	c := make(chan Msg, 100)
   162  	p := &peer{id: idStr, peers: peerMap, sharedLock: l, msgChan: c, mockedMethods: make(map[string]struct{}), leaderFromCallback: false, callbackInvoked: false}
   163  	p.LeaderElectionService = NewLeaderElectionService(p, idStr, p.leaderCallback)
   164  	l.Lock()
   165  	peerMap[idStr] = p
   166  	l.Unlock()
   167  	return p
   168  
   169  }
   170  
   171  func waitForMultipleLeadersElection(t *testing.T, peers []*peer, leadersNum int) []string {
   172  	end := time.Now().Add(testTimeout)
   173  	for time.Now().Before(end) {
   174  		var leaders []string
   175  		for _, p := range peers {
   176  			if p.IsLeader() {
   177  				leaders = append(leaders, p.id)
   178  			}
   179  		}
   180  		if len(leaders) >= leadersNum {
   181  			return leaders
   182  		}
   183  		time.Sleep(testPollInterval)
   184  	}
   185  	t.Fatal("No leader detected")
   186  	return nil
   187  }
   188  
   189  func waitForLeaderElection(t *testing.T, peers []*peer) []string {
   190  	return waitForMultipleLeadersElection(t, peers, 1)
   191  }
   192  
   193  func TestInitPeersAtSameTime(t *testing.T) {
   194  	t.Parallel()
   195  	// Scenario: Peers are spawned at the same time
   196  	// expected outcome: the peer that has the lowest ID is the leader
   197  	peers := createPeers(0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
   198  	time.Sleep(getStartupGracePeriod() + getLeaderElectionDuration())
   199  	leaders := waitForLeaderElection(t, peers)
   200  	isP0leader := peers[len(peers)-1].IsLeader()
   201  	assert.True(t, isP0leader, "p0 isn't a leader. Leaders are: %v", leaders)
   202  	assert.Len(t, leaders, 1, "More than 1 leader elected")
   203  	waitForBoolFunc(t, peers[len(peers)-1].isLeaderFromCallback, true, "Leadership callback result is wrong for ", peers[len(peers)-1].id)
   204  }
   205  
   206  func TestInitPeersStartAtIntervals(t *testing.T) {
   207  	t.Parallel()
   208  	// Scenario: Peers are spawned one by one in a slow rate
   209  	// expected outcome: the first peer is the leader although its ID is lowest
   210  	peers := createPeers(getStartupGracePeriod()+getLeadershipDeclarationInterval(), 3, 2, 1, 0)
   211  	waitForLeaderElection(t, peers)
   212  	assert.True(t, peers[0].IsLeader())
   213  }
   214  
   215  func TestStop(t *testing.T) {
   216  	t.Parallel()
   217  	// Scenario: peers are spawned at the same time
   218  	// and then are stopped. We count the number of Gossip() invocations they invoke
   219  	// after they stop, and it should not increase after they are stopped
   220  	peers := createPeers(0, 3, 2, 1, 0)
   221  	var gossipCounter int32
   222  	for i, p := range peers {
   223  		p.On("Gossip", mock.Anything).Run(func(args mock.Arguments) {
   224  			msg := args.Get(0).(Msg)
   225  			atomic.AddInt32(&gossipCounter, int32(1))
   226  			for j := range peers {
   227  				if i == j {
   228  					continue
   229  				}
   230  				peers[j].msgChan <- msg
   231  			}
   232  		})
   233  	}
   234  	waitForLeaderElection(t, peers)
   235  	for _, p := range peers {
   236  		p.Stop()
   237  	}
   238  	time.Sleep(getLeaderAliveThreshold())
   239  	gossipCounterAfterStop := atomic.LoadInt32(&gossipCounter)
   240  	time.Sleep(getLeaderAliveThreshold() * 5)
   241  	assert.Equal(t, gossipCounterAfterStop, atomic.LoadInt32(&gossipCounter))
   242  }
   243  
   244  func TestConvergence(t *testing.T) {
   245  	// Scenario: 2 peer group converge their views
   246  	// expected outcome: only 1 leader is left out of the 2
   247  	// and that leader is the leader with the lowest ID
   248  	t.Parallel()
   249  	peers1 := createPeers(0, 3, 2, 1, 0)
   250  	peers2 := createPeers(0, 4, 5, 6, 7)
   251  	leaders1 := waitForLeaderElection(t, peers1)
   252  	leaders2 := waitForLeaderElection(t, peers2)
   253  	assert.Len(t, leaders1, 1, "Peer group 1 was suppose to have 1 leader exactly")
   254  	assert.Len(t, leaders2, 1, "Peer group 2 was suppose to have 1 leader exactly")
   255  	combinedPeers := append(peers1, peers2...)
   256  
   257  	var allPeerIds []Peer
   258  	for _, p := range combinedPeers {
   259  		allPeerIds = append(allPeerIds, &peer{id: p.id})
   260  	}
   261  
   262  	for i, p := range combinedPeers {
   263  		index := i
   264  		gossipFunc := func(args mock.Arguments) {
   265  			msg := args.Get(0).(Msg)
   266  			for j := range combinedPeers {
   267  				if index == j {
   268  					continue
   269  				}
   270  				combinedPeers[j].msgChan <- msg
   271  			}
   272  		}
   273  		p.On("Gossip", mock.Anything).Run(gossipFunc)
   274  		p.On("Peers").Return(allPeerIds)
   275  	}
   276  
   277  	time.Sleep(getLeaderAliveThreshold() * 5)
   278  	finalLeaders := waitForLeaderElection(t, combinedPeers)
   279  	assert.Len(t, finalLeaders, 1, "Combined peer group was suppose to have 1 leader exactly")
   280  	assert.Equal(t, leaders1[0], finalLeaders[0], "Combined peer group has different leader than expected:")
   281  
   282  	for _, p := range combinedPeers {
   283  		if p.id == finalLeaders[0] {
   284  			waitForBoolFunc(t, p.isLeaderFromCallback, true, "Leadership callback result is wrong for ", p.id)
   285  			waitForBoolFunc(t, p.isCallbackInvoked, true, "Leadership callback wasn't invoked for ", p.id)
   286  		} else {
   287  			waitForBoolFunc(t, p.isLeaderFromCallback, false, "Leadership callback result is wrong for ", p.id)
   288  			if p.id == leaders2[0] {
   289  				waitForBoolFunc(t, p.isCallbackInvoked, true, "Leadership callback wasn't invoked for ", p.id)
   290  			}
   291  		}
   292  	}
   293  }
   294  
   295  func TestLeadershipTakeover(t *testing.T) {
   296  	t.Parallel()
   297  	// Scenario: Peers spawn one by one in descending order.
   298  	// After a while, the leader peer stops.
   299  	// expected outcome: the peer that takes over is the peer with lowest ID
   300  	peers := createPeers(getStartupGracePeriod()+getLeadershipDeclarationInterval(), 5, 4, 3, 2)
   301  	leaders := waitForLeaderElection(t, peers)
   302  	assert.Len(t, leaders, 1, "Only 1 leader should have been elected")
   303  	assert.Equal(t, "p5", leaders[0])
   304  	peers[0].Stop()
   305  	time.Sleep(getLeadershipDeclarationInterval() + getLeaderAliveThreshold()*3)
   306  	leaders = waitForLeaderElection(t, peers[1:])
   307  	assert.Len(t, leaders, 1, "Only 1 leader should have been elected")
   308  	assert.Equal(t, "p2", leaders[0])
   309  }
   310  
   311  func TestPartition(t *testing.T) {
   312  	t.Parallel()
   313  	// Scenario: peers spawn together, and then after a while a network partition occurs
   314  	// and no peer can communicate with another peer
   315  	// Expected outcome 1: each peer is a leader
   316  	// After this, we heal the partition to be a unified view again
   317  	// Expected outcome 2: p0 is the leader once again
   318  	peers := createPeers(0, 5, 4, 3, 2, 1, 0)
   319  	leaders := waitForLeaderElection(t, peers)
   320  	assert.Len(t, leaders, 1, "Only 1 leader should have been elected")
   321  	assert.Equal(t, "p0", leaders[0])
   322  	waitForBoolFunc(t, peers[len(peers)-1].isLeaderFromCallback, true, "Leadership callback result is wrong for %s", peers[len(peers)-1].id)
   323  
   324  	for _, p := range peers {
   325  		p.On("Peers").Return([]Peer{})
   326  		p.On("Gossip", mock.Anything)
   327  	}
   328  	time.Sleep(getLeadershipDeclarationInterval() + getLeaderAliveThreshold()*2)
   329  	leaders = waitForMultipleLeadersElection(t, peers, 6)
   330  	assert.Len(t, leaders, 6)
   331  	for _, p := range peers {
   332  		waitForBoolFunc(t, p.isLeaderFromCallback, true, "Leadership callback result is wrong for %s", p.id)
   333  	}
   334  
   335  	for _, p := range peers {
   336  		p.sharedLock.Lock()
   337  		p.mockedMethods = make(map[string]struct{})
   338  		p.callbackInvoked = false
   339  		p.sharedLock.Unlock()
   340  	}
   341  	time.Sleep(getLeadershipDeclarationInterval() + getLeaderAliveThreshold()*2)
   342  	leaders = waitForLeaderElection(t, peers)
   343  	assert.Len(t, leaders, 1, "Only 1 leader should have been elected")
   344  	assert.Equal(t, "p0", leaders[0])
   345  	for _, p := range peers {
   346  		if p.id == leaders[0] {
   347  			waitForBoolFunc(t, p.isLeaderFromCallback, true, "Leadership callback result is wrong for %s", p.id)
   348  		} else {
   349  			waitForBoolFunc(t, p.isLeaderFromCallback, false, "Leadership callback result is wrong for %s", p.id)
   350  			waitForBoolFunc(t, p.isCallbackInvoked, true, "Leadership callback wasn't invoked for %s", p.id)
   351  		}
   352  	}
   353  
   354  }
   355  
   356  func TestConfigFromFile(t *testing.T) {
   357  	preStartupGracePeriod := getStartupGracePeriod()
   358  	preMembershipSampleInterval := getMembershipSampleInterval()
   359  	preLeaderAliveThreshold := getLeaderAliveThreshold()
   360  	preLeaderElectionDuration := getLeaderElectionDuration()
   361  
   362  	// Recover the config values in order to avoid impacting other tests
   363  	defer func() {
   364  		SetStartupGracePeriod(preStartupGracePeriod)
   365  		SetMembershipSampleInterval(preMembershipSampleInterval)
   366  		SetLeaderAliveThreshold(preLeaderAliveThreshold)
   367  		SetLeaderElectionDuration(preLeaderElectionDuration)
   368  	}()
   369  
   370  	// Verify if using default values when config is missing
   371  	viper.Reset()
   372  	assert.Equal(t, time.Second*15, getStartupGracePeriod())
   373  	assert.Equal(t, time.Second, getMembershipSampleInterval())
   374  	assert.Equal(t, time.Second*10, getLeaderAliveThreshold())
   375  	assert.Equal(t, time.Second*5, getLeaderElectionDuration())
   376  	assert.Equal(t, getLeaderAliveThreshold()/2, getLeadershipDeclarationInterval())
   377  
   378  	//Verify reading the values from config file
   379  	viper.Reset()
   380  	viper.SetConfigName("core")
   381  	viper.SetEnvPrefix("CORE")
   382  	config.AddDevConfigPath(nil)
   383  	viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
   384  	viper.AutomaticEnv()
   385  	err := viper.ReadInConfig()
   386  	assert.NoError(t, err)
   387  	assert.Equal(t, time.Second*15, getStartupGracePeriod())
   388  	assert.Equal(t, time.Second, getMembershipSampleInterval())
   389  	assert.Equal(t, time.Second*10, getLeaderAliveThreshold())
   390  	assert.Equal(t, time.Second*5, getLeaderElectionDuration())
   391  	assert.Equal(t, getLeaderAliveThreshold()/2, getLeadershipDeclarationInterval())
   392  }
   393  
   394  func waitForBoolFunc(t *testing.T, f func() bool, expectedValue bool, msgAndArgs ...interface{}) {
   395  	end := time.Now().Add(testTimeout)
   396  	for time.Now().Before(end) {
   397  		if f() == expectedValue {
   398  			return
   399  		}
   400  		time.Sleep(testPollInterval)
   401  	}
   402  	assert.Fail(t, fmt.Sprintf("Should be %t", expectedValue), msgAndArgs...)
   403  }