github.com/darrenli6/fabric-sdk-example@v0.0.0-20220109053535-94b13b56df8c/gossip/election/election_test.go (about) 1 /* 2 Copyright IBM Corp. 2017 All Rights Reserved. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package election 18 19 import ( 20 "fmt" 21 "strings" 22 "sync" 23 "sync/atomic" 24 "testing" 25 "time" 26 27 "github.com/hyperledger/fabric/core/config" 28 "github.com/hyperledger/fabric/gossip/util" 29 "github.com/spf13/viper" 30 "github.com/stretchr/testify/assert" 31 "github.com/stretchr/testify/mock" 32 ) 33 34 const ( 35 testTimeout = 5 * time.Second 36 testPollInterval = time.Millisecond * 300 37 ) 38 39 func init() { 40 util.SetupTestLogging() 41 SetStartupGracePeriod(time.Millisecond * 500) 42 SetMembershipSampleInterval(time.Millisecond * 100) 43 SetLeaderAliveThreshold(time.Millisecond * 500) 44 SetLeaderElectionDuration(time.Millisecond * 500) 45 } 46 47 type msg struct { 48 sender string 49 proposal bool 50 } 51 52 func (m *msg) SenderID() peerID { 53 return peerID(m.sender) 54 } 55 56 func (m *msg) IsProposal() bool { 57 return m.proposal 58 } 59 60 func (m *msg) IsDeclaration() bool { 61 return !m.proposal 62 } 63 64 type peer struct { 65 mockedMethods map[string]struct{} 66 mock.Mock 67 id string 68 peers map[string]*peer 69 sharedLock *sync.RWMutex 70 msgChan chan Msg 71 leaderFromCallback bool 72 callbackInvoked bool 73 lock sync.RWMutex 74 LeaderElectionService 75 } 76 77 func (p *peer) On(methodName string, arguments ...interface{}) *mock.Call { 78 p.sharedLock.Lock() 79 defer p.sharedLock.Unlock() 80 p.mockedMethods[methodName] = struct{}{} 81 return p.Mock.On(methodName, arguments...) 82 } 83 84 func (p *peer) ID() peerID { 85 return peerID(p.id) 86 } 87 88 func (p *peer) Gossip(m Msg) { 89 p.sharedLock.RLock() 90 defer p.sharedLock.RUnlock() 91 92 if _, isMocked := p.mockedMethods["Gossip"]; isMocked { 93 p.Called(m) 94 return 95 } 96 97 for _, peer := range p.peers { 98 if peer.id == p.id { 99 continue 100 } 101 peer.msgChan <- m.(*msg) 102 } 103 } 104 105 func (p *peer) Accept() <-chan Msg { 106 p.sharedLock.RLock() 107 defer p.sharedLock.RUnlock() 108 109 if _, isMocked := p.mockedMethods["Accept"]; isMocked { 110 args := p.Called() 111 return args.Get(0).(<-chan Msg) 112 } 113 return (<-chan Msg)(p.msgChan) 114 } 115 116 func (p *peer) CreateMessage(isDeclaration bool) Msg { 117 return &msg{proposal: !isDeclaration, sender: p.id} 118 } 119 120 func (p *peer) Peers() []Peer { 121 p.sharedLock.RLock() 122 defer p.sharedLock.RUnlock() 123 124 if _, isMocked := p.mockedMethods["Peers"]; isMocked { 125 args := p.Called() 126 return args.Get(0).([]Peer) 127 } 128 129 var peers []Peer 130 for id := range p.peers { 131 peers = append(peers, &peer{id: id}) 132 } 133 return peers 134 } 135 136 func (p *peer) leaderCallback(isLeader bool) { 137 p.lock.Lock() 138 defer p.lock.Unlock() 139 p.leaderFromCallback = isLeader 140 p.callbackInvoked = true 141 } 142 143 func (p *peer) isLeaderFromCallback() bool { 144 p.lock.RLock() 145 defer p.lock.RUnlock() 146 return p.leaderFromCallback 147 } 148 149 func (p *peer) isCallbackInvoked() bool { 150 p.lock.RLock() 151 defer p.lock.RUnlock() 152 return p.callbackInvoked 153 } 154 155 func createPeers(spawnInterval time.Duration, ids ...int) []*peer { 156 peers := make([]*peer, len(ids)) 157 peerMap := make(map[string]*peer) 158 l := &sync.RWMutex{} 159 for i, id := range ids { 160 p := createPeer(id, peerMap, l) 161 if spawnInterval != 0 { 162 time.Sleep(spawnInterval) 163 } 164 peers[i] = p 165 } 166 return peers 167 } 168 169 func createPeer(id int, peerMap map[string]*peer, l *sync.RWMutex) *peer { 170 idStr := fmt.Sprintf("p%d", id) 171 c := make(chan Msg, 100) 172 p := &peer{id: idStr, peers: peerMap, sharedLock: l, msgChan: c, mockedMethods: make(map[string]struct{}), leaderFromCallback: false, callbackInvoked: false} 173 p.LeaderElectionService = NewLeaderElectionService(p, idStr, p.leaderCallback) 174 l.Lock() 175 peerMap[idStr] = p 176 l.Unlock() 177 return p 178 179 } 180 181 func waitForMultipleLeadersElection(t *testing.T, peers []*peer, leadersNum int) []string { 182 end := time.Now().Add(testTimeout) 183 for time.Now().Before(end) { 184 var leaders []string 185 for _, p := range peers { 186 if p.IsLeader() { 187 leaders = append(leaders, p.id) 188 } 189 } 190 if len(leaders) >= leadersNum { 191 return leaders 192 } 193 time.Sleep(testPollInterval) 194 } 195 t.Fatal("No leader detected") 196 return nil 197 } 198 199 func waitForLeaderElection(t *testing.T, peers []*peer) []string { 200 return waitForMultipleLeadersElection(t, peers, 1) 201 } 202 203 func TestInitPeersAtSameTime(t *testing.T) { 204 t.Parallel() 205 // Scenario: Peers are spawned at the same time 206 // expected outcome: the peer that has the lowest ID is the leader 207 peers := createPeers(0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 208 time.Sleep(getStartupGracePeriod() + getLeaderElectionDuration()) 209 leaders := waitForLeaderElection(t, peers) 210 isP0leader := peers[len(peers)-1].IsLeader() 211 assert.True(t, isP0leader, "p0 isn't a leader. Leaders are: %v", leaders) 212 assert.Len(t, leaders, 1, "More than 1 leader elected") 213 waitForBoolFunc(t, peers[len(peers)-1].isLeaderFromCallback, true, "Leadership callback result is wrong for ", peers[len(peers)-1].id) 214 } 215 216 func TestInitPeersStartAtIntervals(t *testing.T) { 217 t.Parallel() 218 // Scenario: Peers are spawned one by one in a slow rate 219 // expected outcome: the first peer is the leader although its ID is lowest 220 peers := createPeers(getStartupGracePeriod()+getLeadershipDeclarationInterval(), 3, 2, 1, 0) 221 waitForLeaderElection(t, peers) 222 assert.True(t, peers[0].IsLeader()) 223 } 224 225 func TestStop(t *testing.T) { 226 t.Parallel() 227 // Scenario: peers are spawned at the same time 228 // and then are stopped. We count the number of Gossip() invocations they invoke 229 // after they stop, and it should not increase after they are stopped 230 peers := createPeers(0, 3, 2, 1, 0) 231 var gossipCounter int32 232 for i, p := range peers { 233 p.On("Gossip", mock.Anything).Run(func(args mock.Arguments) { 234 msg := args.Get(0).(Msg) 235 atomic.AddInt32(&gossipCounter, int32(1)) 236 for j := range peers { 237 if i == j { 238 continue 239 } 240 peers[j].msgChan <- msg 241 } 242 }) 243 } 244 waitForLeaderElection(t, peers) 245 for _, p := range peers { 246 p.Stop() 247 } 248 time.Sleep(getLeaderAliveThreshold()) 249 gossipCounterAfterStop := atomic.LoadInt32(&gossipCounter) 250 time.Sleep(getLeaderAliveThreshold() * 5) 251 assert.Equal(t, gossipCounterAfterStop, atomic.LoadInt32(&gossipCounter)) 252 } 253 254 func TestConvergence(t *testing.T) { 255 // Scenario: 2 peer group converge their views 256 // expected outcome: only 1 leader is left out of the 2 257 // and that leader is the leader with the lowest ID 258 t.Parallel() 259 peers1 := createPeers(0, 3, 2, 1, 0) 260 peers2 := createPeers(0, 4, 5, 6, 7) 261 leaders1 := waitForLeaderElection(t, peers1) 262 leaders2 := waitForLeaderElection(t, peers2) 263 assert.Len(t, leaders1, 1, "Peer group 1 was suppose to have 1 leader exactly") 264 assert.Len(t, leaders2, 1, "Peer group 2 was suppose to have 1 leader exactly") 265 combinedPeers := append(peers1, peers2...) 266 267 var allPeerIds []Peer 268 for _, p := range combinedPeers { 269 allPeerIds = append(allPeerIds, &peer{id: p.id}) 270 } 271 272 for i, p := range combinedPeers { 273 index := i 274 gossipFunc := func(args mock.Arguments) { 275 msg := args.Get(0).(Msg) 276 for j := range combinedPeers { 277 if index == j { 278 continue 279 } 280 combinedPeers[j].msgChan <- msg 281 } 282 } 283 p.On("Gossip", mock.Anything).Run(gossipFunc) 284 p.On("Peers").Return(allPeerIds) 285 } 286 287 time.Sleep(getLeaderAliveThreshold() * 5) 288 finalLeaders := waitForLeaderElection(t, combinedPeers) 289 assert.Len(t, finalLeaders, 1, "Combined peer group was suppose to have 1 leader exactly") 290 assert.Equal(t, leaders1[0], finalLeaders[0], "Combined peer group has different leader than expected:") 291 292 for _, p := range combinedPeers { 293 if p.id == finalLeaders[0] { 294 waitForBoolFunc(t, p.isLeaderFromCallback, true, "Leadership callback result is wrong for ", p.id) 295 waitForBoolFunc(t, p.isCallbackInvoked, true, "Leadership callback wasn't invoked for ", p.id) 296 } else { 297 waitForBoolFunc(t, p.isLeaderFromCallback, false, "Leadership callback result is wrong for ", p.id) 298 if p.id == leaders2[0] { 299 waitForBoolFunc(t, p.isCallbackInvoked, true, "Leadership callback wasn't invoked for ", p.id) 300 } 301 } 302 } 303 } 304 305 func TestLeadershipTakeover(t *testing.T) { 306 t.Parallel() 307 // Scenario: Peers spawn one by one in descending order. 308 // After a while, the leader peer stops. 309 // expected outcome: the peer that takes over is the peer with lowest ID 310 peers := createPeers(getStartupGracePeriod()+getLeadershipDeclarationInterval(), 5, 4, 3, 2) 311 leaders := waitForLeaderElection(t, peers) 312 assert.Len(t, leaders, 1, "Only 1 leader should have been elected") 313 assert.Equal(t, "p5", leaders[0]) 314 peers[0].Stop() 315 time.Sleep(getLeadershipDeclarationInterval() + getLeaderAliveThreshold()*3) 316 leaders = waitForLeaderElection(t, peers[1:]) 317 assert.Len(t, leaders, 1, "Only 1 leader should have been elected") 318 assert.Equal(t, "p2", leaders[0]) 319 } 320 321 func TestYield(t *testing.T) { 322 t.Parallel() 323 // Scenario: Peers spawn and a leader is elected. 324 // After a while, the leader yields. 325 // (Call yield twice to ensure only one callback is called) 326 // Expected outcome: 327 // (1) A new leader is elected 328 // (2) The old leader doesn't take back its leadership 329 peers := createPeers(0, 0, 1, 2, 3, 4, 5) 330 leaders := waitForLeaderElection(t, peers) 331 assert.Len(t, leaders, 1, "Only 1 leader should have been elected") 332 assert.Equal(t, "p0", leaders[0]) 333 peers[0].Yield() 334 // Ensure the callback was called with 'false' 335 assert.True(t, peers[0].isCallbackInvoked()) 336 assert.False(t, peers[0].isLeaderFromCallback()) 337 // Clear the callback invoked flag 338 peers[0].lock.Lock() 339 peers[0].callbackInvoked = false 340 peers[0].lock.Unlock() 341 // Yield again and ensure it isn't called again 342 peers[0].Yield() 343 assert.False(t, peers[0].isCallbackInvoked()) 344 345 ensureP0isNotAleader := func() bool { 346 leaders := waitForLeaderElection(t, peers) 347 return len(leaders) == 1 && leaders[0] != "p0" 348 } 349 // A new leader is elected, and it is not p0 350 waitForBoolFunc(t, ensureP0isNotAleader, true) 351 time.Sleep(getLeaderAliveThreshold() * 2) 352 // After a while, p0 doesn't restore its leadership status 353 waitForBoolFunc(t, ensureP0isNotAleader, true) 354 } 355 356 func TestYieldSinglePeer(t *testing.T) { 357 t.Parallel() 358 // Scenario: spawn a single peer and have it yield. 359 // Ensure it recovers its leadership after a while. 360 peers := createPeers(0, 0) 361 waitForLeaderElection(t, peers) 362 peers[0].Yield() 363 assert.False(t, peers[0].IsLeader()) 364 waitForLeaderElection(t, peers) 365 } 366 367 func TestYieldAllPeers(t *testing.T) { 368 t.Parallel() 369 // Scenario: spawn 2 peers and have them all yield after regaining leadership. 370 // Ensure the first peer is the leader in the end after both peers yield 371 peers := createPeers(0, 0, 1) 372 leaders := waitForLeaderElection(t, peers) 373 assert.Len(t, leaders, 1, "Only 1 leader should have been elected") 374 assert.Equal(t, "p0", leaders[0]) 375 peers[0].Yield() 376 leaders = waitForLeaderElection(t, peers) 377 assert.Len(t, leaders, 1, "Only 1 leader should have been elected") 378 assert.Equal(t, "p1", leaders[0]) 379 peers[1].Yield() 380 leaders = waitForLeaderElection(t, peers) 381 assert.Len(t, leaders, 1, "Only 1 leader should have been elected") 382 assert.Equal(t, "p0", leaders[0]) 383 } 384 385 func TestPartition(t *testing.T) { 386 t.Parallel() 387 // Scenario: peers spawn together, and then after a while a network partition occurs 388 // and no peer can communicate with another peer 389 // Expected outcome 1: each peer is a leader 390 // After this, we heal the partition to be a unified view again 391 // Expected outcome 2: p0 is the leader once again 392 peers := createPeers(0, 5, 4, 3, 2, 1, 0) 393 leaders := waitForLeaderElection(t, peers) 394 assert.Len(t, leaders, 1, "Only 1 leader should have been elected") 395 assert.Equal(t, "p0", leaders[0]) 396 waitForBoolFunc(t, peers[len(peers)-1].isLeaderFromCallback, true, "Leadership callback result is wrong for %s", peers[len(peers)-1].id) 397 398 for _, p := range peers { 399 p.On("Peers").Return([]Peer{}) 400 p.On("Gossip", mock.Anything) 401 } 402 time.Sleep(getLeadershipDeclarationInterval() + getLeaderAliveThreshold()*2) 403 leaders = waitForMultipleLeadersElection(t, peers, 6) 404 assert.Len(t, leaders, 6) 405 for _, p := range peers { 406 waitForBoolFunc(t, p.isLeaderFromCallback, true, "Leadership callback result is wrong for %s", p.id) 407 } 408 409 for _, p := range peers { 410 p.sharedLock.Lock() 411 p.mockedMethods = make(map[string]struct{}) 412 p.callbackInvoked = false 413 p.sharedLock.Unlock() 414 } 415 time.Sleep(getLeadershipDeclarationInterval() + getLeaderAliveThreshold()*2) 416 leaders = waitForLeaderElection(t, peers) 417 assert.Len(t, leaders, 1, "Only 1 leader should have been elected") 418 assert.Equal(t, "p0", leaders[0]) 419 for _, p := range peers { 420 if p.id == leaders[0] { 421 waitForBoolFunc(t, p.isLeaderFromCallback, true, "Leadership callback result is wrong for %s", p.id) 422 } else { 423 waitForBoolFunc(t, p.isLeaderFromCallback, false, "Leadership callback result is wrong for %s", p.id) 424 waitForBoolFunc(t, p.isCallbackInvoked, true, "Leadership callback wasn't invoked for %s", p.id) 425 } 426 } 427 428 } 429 430 func TestConfigFromFile(t *testing.T) { 431 preStartupGracePeriod := getStartupGracePeriod() 432 preMembershipSampleInterval := getMembershipSampleInterval() 433 preLeaderAliveThreshold := getLeaderAliveThreshold() 434 preLeaderElectionDuration := getLeaderElectionDuration() 435 436 // Recover the config values in order to avoid impacting other tests 437 defer func() { 438 SetStartupGracePeriod(preStartupGracePeriod) 439 SetMembershipSampleInterval(preMembershipSampleInterval) 440 SetLeaderAliveThreshold(preLeaderAliveThreshold) 441 SetLeaderElectionDuration(preLeaderElectionDuration) 442 }() 443 444 // Verify if using default values when config is missing 445 viper.Reset() 446 assert.Equal(t, time.Second*15, getStartupGracePeriod()) 447 assert.Equal(t, time.Second, getMembershipSampleInterval()) 448 assert.Equal(t, time.Second*10, getLeaderAliveThreshold()) 449 assert.Equal(t, time.Second*5, getLeaderElectionDuration()) 450 assert.Equal(t, getLeaderAliveThreshold()/2, getLeadershipDeclarationInterval()) 451 452 //Verify reading the values from config file 453 viper.Reset() 454 viper.SetConfigName("core") 455 viper.SetEnvPrefix("CORE") 456 config.AddDevConfigPath(nil) 457 viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) 458 viper.AutomaticEnv() 459 err := viper.ReadInConfig() 460 assert.NoError(t, err) 461 assert.Equal(t, time.Second*15, getStartupGracePeriod()) 462 assert.Equal(t, time.Second, getMembershipSampleInterval()) 463 assert.Equal(t, time.Second*10, getLeaderAliveThreshold()) 464 assert.Equal(t, time.Second*5, getLeaderElectionDuration()) 465 assert.Equal(t, getLeaderAliveThreshold()/2, getLeadershipDeclarationInterval()) 466 } 467 468 func waitForBoolFunc(t *testing.T, f func() bool, expectedValue bool, msgAndArgs ...interface{}) { 469 end := time.Now().Add(testTimeout) 470 for time.Now().Before(end) { 471 if f() == expectedValue { 472 return 473 } 474 time.Sleep(testPollInterval) 475 } 476 assert.Fail(t, fmt.Sprintf("Should be %t", expectedValue), msgAndArgs...) 477 }