github.com/kchristidis/fabric@v1.0.4-0.20171028114726-837acd08cde1/gossip/state/state_test.go (about) 1 /* 2 Copyright IBM Corp. 2016 All Rights Reserved. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package state 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 "math/rand" 24 "strconv" 25 "sync" 26 "testing" 27 "time" 28 29 pb "github.com/golang/protobuf/proto" 30 "github.com/hyperledger/fabric/common/configtx/test" 31 "github.com/hyperledger/fabric/common/util" 32 "github.com/hyperledger/fabric/core/committer" 33 "github.com/hyperledger/fabric/core/ledger/ledgermgmt" 34 "github.com/hyperledger/fabric/core/mocks/validator" 35 "github.com/hyperledger/fabric/gossip/api" 36 "github.com/hyperledger/fabric/gossip/comm" 37 "github.com/hyperledger/fabric/gossip/common" 38 "github.com/hyperledger/fabric/gossip/discovery" 39 "github.com/hyperledger/fabric/gossip/gossip" 40 "github.com/hyperledger/fabric/gossip/identity" 41 "github.com/hyperledger/fabric/gossip/state/mocks" 42 gutil "github.com/hyperledger/fabric/gossip/util" 43 pcomm "github.com/hyperledger/fabric/protos/common" 44 proto "github.com/hyperledger/fabric/protos/gossip" 45 "github.com/op/go-logging" 46 "github.com/spf13/viper" 47 "github.com/stretchr/testify/assert" 48 "github.com/stretchr/testify/mock" 49 ) 50 51 var ( 52 portPrefix = 5610 53 ) 54 55 var orgID = []byte("ORG1") 56 57 type peerIdentityAcceptor func(identity api.PeerIdentityType) error 58 59 var noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error { 60 return nil 61 } 62 63 type joinChanMsg struct { 64 } 65 66 func init() { 67 gutil.SetupTestLogging() 68 logging.SetLevel(logging.DEBUG, gutil.LoggingStateModule) 69 } 70 71 // SequenceNumber returns the sequence number of the block that the message 72 // is derived from 73 func (*joinChanMsg) SequenceNumber() uint64 { 74 return uint64(time.Now().UnixNano()) 75 } 76 77 // Members returns the organizations of the channel 78 func (jcm *joinChanMsg) Members() []api.OrgIdentityType { 79 return []api.OrgIdentityType{orgID} 80 } 81 82 // AnchorPeersOf returns the anchor peers of the given organization 83 func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer { 84 return []api.AnchorPeer{} 85 } 86 87 type orgCryptoService struct { 88 } 89 90 // OrgByPeerIdentity returns the OrgIdentityType 91 // of a given peer identity 92 func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType { 93 return orgID 94 } 95 96 // Verify verifies a JoinChannelMessage, returns nil on success, 97 // and an error on failure 98 func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error { 99 return nil 100 } 101 102 type cryptoServiceMock struct { 103 acceptor peerIdentityAcceptor 104 } 105 106 // GetPKIidOfCert returns the PKI-ID of a peer's identity 107 func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType { 108 return common.PKIidType(peerIdentity) 109 } 110 111 // VerifyBlock returns nil if the block is properly signed, 112 // else returns error 113 func (*cryptoServiceMock) VerifyBlock(chainID common.ChainID, seqNum uint64, signedBlock []byte) error { 114 return nil 115 } 116 117 // Sign signs msg with this peer's signing key and outputs 118 // the signature if no error occurred. 119 func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) { 120 clone := make([]byte, len(msg)) 121 copy(clone, msg) 122 return clone, nil 123 } 124 125 // Verify checks that signature is a valid signature of message under a peer's verification key. 126 // If the verification succeeded, Verify returns nil meaning no error occurred. 127 // If peerCert is nil, then the signature is verified against this peer's verification key. 128 func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error { 129 equal := bytes.Equal(signature, message) 130 if !equal { 131 return fmt.Errorf("Wrong signature:%v, %v", signature, message) 132 } 133 return nil 134 } 135 136 // VerifyByChannel checks that signature is a valid signature of message 137 // under a peer's verification key, but also in the context of a specific channel. 138 // If the verification succeeded, Verify returns nil meaning no error occurred. 139 // If peerIdentity is nil, then the signature is verified against this peer's verification key. 140 func (cs *cryptoServiceMock) VerifyByChannel(chainID common.ChainID, peerIdentity api.PeerIdentityType, signature, message []byte) error { 141 return cs.acceptor(peerIdentity) 142 } 143 144 func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error { 145 return nil 146 } 147 148 func bootPeers(ids ...int) []string { 149 peers := []string{} 150 for _, id := range ids { 151 peers = append(peers, fmt.Sprintf("localhost:%d", id+portPrefix)) 152 } 153 return peers 154 } 155 156 // Simple presentation of peer which includes only 157 // communication module, gossip and state transfer 158 type peerNode struct { 159 port int 160 g gossip.Gossip 161 s *GossipStateProviderImpl 162 cs *cryptoServiceMock 163 commit committer.Committer 164 } 165 166 // Shutting down all modules used 167 func (node *peerNode) shutdown() { 168 node.s.Stop() 169 node.g.Stop() 170 } 171 172 type mockCommitter struct { 173 mock.Mock 174 sync.Mutex 175 } 176 177 func (mc *mockCommitter) Commit(block *pcomm.Block) error { 178 mc.Lock() 179 m := mc.Mock 180 mc.Unlock() 181 m.Called(block) 182 return nil 183 } 184 185 func (mc *mockCommitter) LedgerHeight() (uint64, error) { 186 mc.Lock() 187 m := mc.Mock 188 mc.Unlock() 189 if m.Called().Get(1) == nil { 190 return m.Called().Get(0).(uint64), nil 191 } 192 return m.Called().Get(0).(uint64), m.Called().Get(1).(error) 193 } 194 195 func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block { 196 if mc.Called(blockSeqs).Get(0) == nil { 197 return nil 198 } 199 return mc.Called(blockSeqs).Get(0).([]*pcomm.Block) 200 } 201 202 func (*mockCommitter) Close() { 203 } 204 205 // Default configuration to be used for gossip and communication modules 206 func newGossipConfig(id int, boot ...int) *gossip.Config { 207 port := id + portPrefix 208 return &gossip.Config{ 209 BindPort: port, 210 BootstrapPeers: bootPeers(boot...), 211 ID: fmt.Sprintf("p%d", id), 212 MaxBlockCountToStore: 0, 213 MaxPropagationBurstLatency: time.Duration(10) * time.Millisecond, 214 MaxPropagationBurstSize: 10, 215 PropagateIterations: 1, 216 PropagatePeerNum: 3, 217 PullInterval: time.Duration(4) * time.Second, 218 PullPeerNum: 5, 219 InternalEndpoint: fmt.Sprintf("localhost:%d", port), 220 PublishCertPeriod: 10 * time.Second, 221 RequestStateInfoInterval: 4 * time.Second, 222 PublishStateInfoInterval: 4 * time.Second, 223 } 224 } 225 226 // Create gossip instance 227 func newGossipInstance(config *gossip.Config, mcs api.MessageCryptoService) gossip.Gossip { 228 id := api.PeerIdentityType(config.InternalEndpoint) 229 idMapper := identity.NewIdentityMapper(mcs, id) 230 return gossip.NewGossipServiceWithServer(config, &orgCryptoService{}, mcs, 231 idMapper, id, nil) 232 } 233 234 // Create new instance of KVLedger to be used for testing 235 func newCommitter(id int) committer.Committer { 236 cb, _ := test.MakeGenesisBlock(strconv.Itoa(id)) 237 ledger, _ := ledgermgmt.CreateLedger(cb) 238 return committer.NewLedgerCommitter(ledger, &validator.MockValidator{}) 239 } 240 241 // Constructing pseudo peer node, simulating only gossip and state transfer part 242 func newPeerNodeWithGossip(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor, g gossip.Gossip) *peerNode { 243 cs := &cryptoServiceMock{acceptor: acceptor} 244 // Gossip component based on configuration provided and communication module 245 if g == nil { 246 g = newGossipInstance(config, &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}) 247 } 248 249 logger.Debug("Joinning channel", util.GetTestChainID()) 250 g.JoinChan(&joinChanMsg{}, common.ChainID(util.GetTestChainID())) 251 252 // Initialize pseudo peer simulator, which has only three 253 // basic parts 254 255 sp := NewGossipStateProvider(util.GetTestChainID(), g, committer, cs) 256 if sp == nil { 257 return nil 258 } 259 260 return &peerNode{ 261 port: config.BindPort, 262 g: g, 263 s: sp.(*GossipStateProviderImpl), 264 commit: committer, 265 cs: cs, 266 } 267 } 268 269 // Constructing pseudo peer node, simulating only gossip and state transfer part 270 func newPeerNode(config *gossip.Config, committer committer.Committer, acceptor peerIdentityAcceptor) *peerNode { 271 return newPeerNodeWithGossip(config, committer, acceptor, nil) 272 } 273 274 func TestNilDirectMsg(t *testing.T) { 275 mc := &mockCommitter{} 276 mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) 277 g := &mocks.GossipMock{} 278 g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) 279 g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) 280 p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g) 281 defer p.shutdown() 282 p.s.handleStateRequest(nil) 283 p.s.directMessage(nil) 284 sMsg, _ := p.s.stateRequestMessage(uint64(10), uint64(8)).NoopSign() 285 req := &comm.ReceivedMessageImpl{ 286 SignedGossipMessage: sMsg, 287 } 288 p.s.directMessage(req) 289 } 290 291 func TestNilAddPayload(t *testing.T) { 292 mc := &mockCommitter{} 293 mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) 294 g := &mocks.GossipMock{} 295 g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) 296 g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) 297 p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g) 298 defer p.shutdown() 299 err := p.s.AddPayload(nil) 300 assert.Error(t, err) 301 assert.Contains(t, err.Error(), "nil") 302 } 303 304 func TestAddPayloadLedgerUnavailable(t *testing.T) { 305 mc := &mockCommitter{} 306 mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) 307 g := &mocks.GossipMock{} 308 g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) 309 g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) 310 p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g) 311 defer p.shutdown() 312 // Simulate a problem in the ledger 313 failedLedger := mock.Mock{} 314 failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger")) 315 mc.Lock() 316 mc.Mock = failedLedger 317 mc.Unlock() 318 319 rawblock := pcomm.NewBlock(uint64(1), []byte{}) 320 b, _ := pb.Marshal(rawblock) 321 err := p.s.AddPayload(&proto.Payload{ 322 SeqNum: uint64(1), 323 Data: b, 324 }) 325 assert.Error(t, err) 326 assert.Contains(t, err.Error(), "Failed obtaining ledger height") 327 assert.Contains(t, err.Error(), "cannot query ledger") 328 } 329 330 func TestLargeBlockGap(t *testing.T) { 331 // Scenario: the peer knows of a peer who has a ledger height much higher 332 // than itself (500 blocks higher). 333 // The peer needs to ask blocks in a way such that the size of the payload buffer 334 // never rises above a certain threshold. 335 336 mc := &mockCommitter{} 337 blocksPassedToLedger := make(chan uint64, 200) 338 mc.On("Commit", mock.Anything).Run(func(arg mock.Arguments) { 339 blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number 340 }) 341 msgsFromPeer := make(chan proto.ReceivedMessage) 342 mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) 343 g := &mocks.GossipMock{} 344 metaState := NewNodeMetastate(500) 345 md, _ := metaState.Bytes() 346 membership := []discovery.NetworkMember{ 347 { 348 PKIid: common.PKIidType("a"), 349 Endpoint: "a", 350 Metadata: md, 351 }} 352 g.On("PeersOfChannel", mock.Anything).Return(membership) 353 g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) 354 g.On("Accept", mock.Anything, true).Return(nil, msgsFromPeer) 355 g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) { 356 msg := arguments.Get(0).(*proto.GossipMessage) 357 // The peer requested a state request 358 req := msg.GetStateRequest() 359 // Construct a skeleton for the response 360 res := &proto.GossipMessage{ 361 Nonce: msg.Nonce, 362 Channel: []byte(util.GetTestChainID()), 363 Content: &proto.GossipMessage_StateResponse{ 364 StateResponse: &proto.RemoteStateResponse{}, 365 }, 366 } 367 // Populate the response with payloads according to what the peer asked 368 for seq := req.StartSeqNum; seq <= req.EndSeqNum; seq++ { 369 rawblock := pcomm.NewBlock(seq, []byte{}) 370 b, _ := pb.Marshal(rawblock) 371 payload := &proto.Payload{ 372 SeqNum: seq, 373 Data: b, 374 } 375 res.GetStateResponse().Payloads = append(res.GetStateResponse().Payloads, payload) 376 } 377 // Finally, send the response down the channel the peer expects to receive it from 378 sMsg, _ := res.NoopSign() 379 msgsFromPeer <- &comm.ReceivedMessageImpl{ 380 SignedGossipMessage: sMsg, 381 } 382 }) 383 p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g) 384 defer p.shutdown() 385 386 // Process blocks at a speed of 20 Millisecond for each block. 387 // The imaginative peer that responds to state 388 // If the payload buffer expands above defMaxBlockDistance*2 + defAntiEntropyBatchSize blocks, fail the test 389 blockProcessingTime := 20 * time.Millisecond // 10 seconds for total 500 blocks 390 expectedSequence := 1 391 for expectedSequence < 500 { 392 blockSeq := <-blocksPassedToLedger 393 assert.Equal(t, expectedSequence, int(blockSeq)) 394 // Ensure payload buffer isn't over-populated 395 assert.True(t, p.s.payloads.Size() <= defMaxBlockDistance*2+defAntiEntropyBatchSize, "payload buffer size is %d", p.s.payloads.Size()) 396 expectedSequence++ 397 time.Sleep(blockProcessingTime) 398 } 399 } 400 401 func TestOverPopulation(t *testing.T) { 402 // Scenario: Add to the state provider blocks 403 // with a gap in between, and ensure that the payload buffer 404 // rejects blocks starting if the distance between the ledger height to the latest 405 // block it contains is bigger than defMaxBlockDistance. 406 407 mc := &mockCommitter{} 408 blocksPassedToLedger := make(chan uint64, 10) 409 mc.On("Commit", mock.Anything).Run(func(arg mock.Arguments) { 410 blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number 411 }) 412 mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) 413 g := &mocks.GossipMock{} 414 g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) 415 g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) 416 p := newPeerNode(newGossipConfig(0), mc, noopPeerIdentityAcceptor) 417 defer p.shutdown() 418 419 // Add some blocks in a sequential manner and make sure it works 420 for i := 1; i <= 4; i++ { 421 rawblock := pcomm.NewBlock(uint64(i), []byte{}) 422 b, _ := pb.Marshal(rawblock) 423 assert.NoError(t, p.s.addPayload(&proto.Payload{ 424 SeqNum: uint64(i), 425 Data: b, 426 }, nonBlocking)) 427 } 428 429 // Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9] 430 // Should succeed 431 for i := 10; i <= defMaxBlockDistance; i++ { 432 rawblock := pcomm.NewBlock(uint64(i), []byte{}) 433 b, _ := pb.Marshal(rawblock) 434 assert.NoError(t, p.s.addPayload(&proto.Payload{ 435 SeqNum: uint64(i), 436 Data: b, 437 }, nonBlocking)) 438 } 439 440 // Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10 441 // Should fail. 442 for i := defMaxBlockDistance + 1; i <= defMaxBlockDistance*10; i++ { 443 rawblock := pcomm.NewBlock(uint64(i), []byte{}) 444 b, _ := pb.Marshal(rawblock) 445 assert.Error(t, p.s.addPayload(&proto.Payload{ 446 SeqNum: uint64(i), 447 Data: b, 448 }, nonBlocking)) 449 } 450 451 // Ensure only blocks 1-4 were passed to the ledger 452 close(blocksPassedToLedger) 453 i := 1 454 for seq := range blocksPassedToLedger { 455 assert.Equal(t, uint64(i), seq) 456 i++ 457 } 458 assert.Equal(t, 5, i) 459 460 // Ensure we don't store too many blocks in memory 461 sp := p.s 462 assert.True(t, sp.payloads.Size() < defMaxBlockDistance) 463 } 464 465 func TestBlockingEnqueue(t *testing.T) { 466 // Scenario: In parallel, get blocks from gossip and from the orderer. 467 // The blocks from the orderer we get are X2 times the amount of blocks from gossip. 468 // The blocks we get from gossip are random indices, to maximize disruption. 469 mc := &mockCommitter{} 470 blocksPassedToLedger := make(chan uint64, 10) 471 mc.On("Commit", mock.Anything).Run(func(arg mock.Arguments) { 472 blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number 473 }) 474 mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) 475 g := &mocks.GossipMock{} 476 g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) 477 g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) 478 p := newPeerNode(newGossipConfig(0), mc, noopPeerIdentityAcceptor) 479 defer p.shutdown() 480 481 numBlocksReceived := 500 482 receivedBlockCount := 0 483 // Get a block from the orderer every 1ms 484 go func() { 485 for i := 1; i <= numBlocksReceived; i++ { 486 rawblock := pcomm.NewBlock(uint64(i), []byte{}) 487 b, _ := pb.Marshal(rawblock) 488 block := &proto.Payload{ 489 SeqNum: uint64(i), 490 Data: b, 491 } 492 p.s.AddPayload(block) 493 time.Sleep(time.Millisecond) 494 } 495 }() 496 497 // Get a block from gossip every 1ms too 498 go func() { 499 rand.Seed(time.Now().UnixNano()) 500 for i := 1; i <= numBlocksReceived/2; i++ { 501 blockSeq := rand.Intn(numBlocksReceived) 502 rawblock := pcomm.NewBlock(uint64(blockSeq), []byte{}) 503 b, _ := pb.Marshal(rawblock) 504 block := &proto.Payload{ 505 SeqNum: uint64(blockSeq), 506 Data: b, 507 } 508 p.s.addPayload(block, nonBlocking) 509 time.Sleep(time.Millisecond) 510 } 511 }() 512 513 for { 514 receivedBlock := <-blocksPassedToLedger 515 receivedBlockCount++ 516 m := mock.Mock{} 517 m.On("LedgerHeight", mock.Anything).Return(receivedBlock, nil) 518 m.On("Commit", mock.Anything).Run(func(arg mock.Arguments) { 519 blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number 520 }) 521 mc.Lock() 522 mc.Mock = m 523 mc.Unlock() 524 assert.Equal(t, receivedBlock, uint64(receivedBlockCount)) 525 if int(receivedBlockCount) == numBlocksReceived { 526 break 527 } 528 time.Sleep(time.Millisecond * 10) 529 t.Log("got block", receivedBlock) 530 } 531 } 532 533 func TestFailures(t *testing.T) { 534 mc := &mockCommitter{} 535 mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil) 536 g := &mocks.GossipMock{} 537 g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil) 538 g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) 539 g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) 540 assert.Panics(t, func() { 541 newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g) 542 }) 543 // Reprogram mock 544 mc.Mock = mock.Mock{} 545 mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger")) 546 assert.Nil(t, newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g)) 547 // Reprogram mock 548 mc.Mock = mock.Mock{} 549 mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) 550 mc.On("GetBlocks", mock.Anything).Return(nil) 551 p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g) 552 assert.Nil(t, p.s.GetBlock(uint64(1))) 553 } 554 555 func TestGossipReception(t *testing.T) { 556 signalChan := make(chan struct{}) 557 rawblock := &pcomm.Block{ 558 Header: &pcomm.BlockHeader{ 559 Number: uint64(1), 560 }, 561 Data: &pcomm.BlockData{ 562 Data: [][]byte{}, 563 }, 564 } 565 b, _ := pb.Marshal(rawblock) 566 567 createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage { 568 c := make(chan *proto.GossipMessage) 569 gMsg := &proto.GossipMessage{ 570 Channel: []byte("AAA"), 571 Content: &proto.GossipMessage_DataMsg{ 572 DataMsg: &proto.DataMessage{ 573 Payload: &proto.Payload{ 574 SeqNum: 1, 575 Data: b, 576 }, 577 }, 578 }, 579 } 580 go func(c chan *proto.GossipMessage) { 581 // Wait for Accept() to be called 582 <-signalChan 583 // Simulate a message reception from the gossip component with an invalid channel 584 c <- gMsg 585 gMsg.Channel = []byte(util.GetTestChainID()) 586 // Simulate a message reception from the gossip component 587 c <- gMsg 588 }(c) 589 return c 590 } 591 592 g := &mocks.GossipMock{} 593 rmc := createChan(signalChan) 594 g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) { 595 signalChan <- struct{}{} 596 }) 597 g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage)) 598 g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{}) 599 mc := &mockCommitter{} 600 receivedChan := make(chan struct{}) 601 mc.On("Commit", mock.Anything).Run(func(arguments mock.Arguments) { 602 block := arguments.Get(0).(*pcomm.Block) 603 assert.Equal(t, uint64(1), block.Header.Number) 604 receivedChan <- struct{}{} 605 }) 606 mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) 607 p := newPeerNodeWithGossip(newGossipConfig(0), mc, noopPeerIdentityAcceptor, g) 608 defer p.shutdown() 609 select { 610 case <-receivedChan: 611 case <-time.After(time.Second * 15): 612 assert.Fail(t, "Didn't commit a block within a timely manner") 613 } 614 } 615 616 func TestAccessControl(t *testing.T) { 617 viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node") 618 ledgermgmt.InitializeTestEnv() 619 defer ledgermgmt.CleanupTestEnv() 620 621 bootstrapSetSize := 5 622 bootstrapSet := make([]*peerNode, 0) 623 624 authorizedPeers := map[string]struct{}{ 625 "localhost:5610": {}, 626 "localhost:5615": {}, 627 "localhost:5618": {}, 628 "localhost:5621": {}, 629 } 630 631 blockPullPolicy := func(identity api.PeerIdentityType) error { 632 if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized { 633 return nil 634 } 635 return errors.New("Not authorized") 636 } 637 638 for i := 0; i < bootstrapSetSize; i++ { 639 commit := newCommitter(i) 640 bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(i), commit, blockPullPolicy)) 641 } 642 643 defer func() { 644 for _, p := range bootstrapSet { 645 p.shutdown() 646 } 647 }() 648 649 msgCount := 5 650 651 for i := 1; i <= msgCount; i++ { 652 rawblock := pcomm.NewBlock(uint64(i), []byte{}) 653 if b, err := pb.Marshal(rawblock); err == nil { 654 payload := &proto.Payload{ 655 SeqNum: uint64(i), 656 Data: b, 657 } 658 bootstrapSet[0].s.AddPayload(payload) 659 } else { 660 t.Fail() 661 } 662 } 663 664 standardPeerSetSize := 10 665 peersSet := make([]*peerNode, 0) 666 667 for i := 0; i < standardPeerSetSize; i++ { 668 commit := newCommitter(bootstrapSetSize + i) 669 peersSet = append(peersSet, newPeerNode(newGossipConfig(bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, blockPullPolicy)) 670 } 671 672 defer func() { 673 for _, p := range peersSet { 674 p.shutdown() 675 } 676 }() 677 678 waitUntilTrueOrTimeout(t, func() bool { 679 for _, p := range peersSet { 680 if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standardPeerSetSize-1 { 681 logger.Debug("Peer discovery has not finished yet") 682 return false 683 } 684 } 685 logger.Debug("All peer discovered each other!!!") 686 return true 687 }, 30*time.Second) 688 689 logger.Debug("Waiting for all blocks to arrive.") 690 waitUntilTrueOrTimeout(t, func() bool { 691 logger.Debug("Trying to see all authorized peers get all blocks, and all non-authorized didn't") 692 for _, p := range peersSet { 693 height, err := p.commit.LedgerHeight() 694 id := fmt.Sprintf("localhost:%d", p.port) 695 if _, isAuthorized := authorizedPeers[id]; isAuthorized { 696 if height != uint64(msgCount+1) || err != nil { 697 return false 698 } 699 } else { 700 if err == nil && height > 1 { 701 assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height) 702 } 703 } 704 } 705 logger.Debug("All peers have same ledger height!!!") 706 return true 707 }, 60*time.Second) 708 } 709 710 /*// Simple scenario to start first booting node, gossip a message 711 // then start second node and verify second node also receives it 712 func TestNewGossipStateProvider_GossipingOneMessage(t *testing.T) { 713 bootId := 0 714 ledgerPath := "/tmp/tests/ledger/" 715 defer os.RemoveAll(ledgerPath) 716 717 bootNodeCommitter := newCommitter(bootId, ledgerPath + "node/") 718 defer bootNodeCommitter.Close() 719 720 bootNode := newPeerNode(newGossipConfig(bootId, 100), bootNodeCommitter) 721 defer bootNode.shutdown() 722 723 rawblock := &peer.Block2{} 724 if err := pb.Unmarshal([]byte{}, rawblock); err != nil { 725 t.Fail() 726 } 727 728 if bytes, err := pb.Marshal(rawblock); err == nil { 729 payload := &proto.Payload{1, "", bytes} 730 bootNode.s.AddPayload(payload) 731 } else { 732 t.Fail() 733 } 734 735 waitUntilTrueOrTimeout(t, func() bool { 736 if block := bootNode.s.GetBlock(uint64(1)); block != nil { 737 return true 738 } 739 return false 740 }, 5 * time.Second) 741 742 bootNode.g.Gossip(createDataMsg(uint64(1), []byte{}, "")) 743 744 peerCommitter := newCommitter(1, ledgerPath + "node/") 745 defer peerCommitter.Close() 746 747 peer := newPeerNode(newGossipConfig(1, 100, bootId), peerCommitter) 748 defer peer.shutdown() 749 750 ready := make(chan interface{}) 751 752 go func(p *peerNode) { 753 for len(p.g.GetPeers()) != 1 { 754 time.Sleep(100 * time.Millisecond) 755 } 756 ready <- struct{}{} 757 }(peer) 758 759 select { 760 case <-ready: 761 { 762 break 763 } 764 case <-time.After(1 * time.Second): 765 { 766 t.Fail() 767 } 768 } 769 770 // Let sure anti-entropy will have a chance to bring missing block 771 waitUntilTrueOrTimeout(t, func() bool { 772 if block := peer.s.GetBlock(uint64(1)); block != nil { 773 return true 774 } 775 return false 776 }, 2 * defAntiEntropyInterval + 1 * time.Second) 777 778 block := peer.s.GetBlock(uint64(1)) 779 780 assert.NotNil(t, block) 781 } 782 783 func TestNewGossipStateProvider_RepeatGossipingOneMessage(t *testing.T) { 784 for i := 0; i < 10; i++ { 785 TestNewGossipStateProvider_GossipingOneMessage(t) 786 } 787 }*/ 788 789 func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) { 790 viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node") 791 ledgermgmt.InitializeTestEnv() 792 defer ledgermgmt.CleanupTestEnv() 793 794 bootstrapSetSize := 5 795 bootstrapSet := make([]*peerNode, 0) 796 797 for i := 0; i < bootstrapSetSize; i++ { 798 commit := newCommitter(i) 799 bootstrapSet = append(bootstrapSet, newPeerNode(newGossipConfig(i), commit, noopPeerIdentityAcceptor)) 800 } 801 802 defer func() { 803 for _, p := range bootstrapSet { 804 p.shutdown() 805 } 806 }() 807 808 msgCount := 10 809 810 for i := 1; i <= msgCount; i++ { 811 rawblock := pcomm.NewBlock(uint64(i), []byte{}) 812 if b, err := pb.Marshal(rawblock); err == nil { 813 payload := &proto.Payload{ 814 SeqNum: uint64(i), 815 Data: b, 816 } 817 bootstrapSet[0].s.AddPayload(payload) 818 } else { 819 t.Fail() 820 } 821 } 822 823 standartPeersSize := 10 824 peersSet := make([]*peerNode, 0) 825 826 for i := 0; i < standartPeersSize; i++ { 827 commit := newCommitter(bootstrapSetSize + i) 828 peersSet = append(peersSet, newPeerNode(newGossipConfig(bootstrapSetSize+i, 0, 1, 2, 3, 4), commit, noopPeerIdentityAcceptor)) 829 } 830 831 defer func() { 832 for _, p := range peersSet { 833 p.shutdown() 834 } 835 }() 836 837 waitUntilTrueOrTimeout(t, func() bool { 838 for _, p := range peersSet { 839 if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standartPeersSize-1 { 840 logger.Debug("Peer discovery has not finished yet") 841 return false 842 } 843 } 844 logger.Debug("All peer discovered each other!!!") 845 return true 846 }, 30*time.Second) 847 848 logger.Debug("Waiting for all blocks to arrive.") 849 waitUntilTrueOrTimeout(t, func() bool { 850 logger.Debug("Trying to see all peers get all blocks") 851 for _, p := range peersSet { 852 height, err := p.commit.LedgerHeight() 853 if height != uint64(msgCount+1) || err != nil { 854 return false 855 } 856 } 857 logger.Debug("All peers have same ledger height!!!") 858 return true 859 }, 60*time.Second) 860 } 861 862 func TestGossipStateProvider_TestStateMessages(t *testing.T) { 863 viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node") 864 ledgermgmt.InitializeTestEnv() 865 defer ledgermgmt.CleanupTestEnv() 866 867 bootPeer := newPeerNode(newGossipConfig(0), newCommitter(0), noopPeerIdentityAcceptor) 868 defer bootPeer.shutdown() 869 870 peer := newPeerNode(newGossipConfig(1, 0), newCommitter(1), noopPeerIdentityAcceptor) 871 defer peer.shutdown() 872 873 naiveStateMsgPredicate := func(message interface{}) bool { 874 return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage() 875 } 876 877 _, bootCh := bootPeer.g.Accept(naiveStateMsgPredicate, true) 878 _, peerCh := peer.g.Accept(naiveStateMsgPredicate, true) 879 880 wg := sync.WaitGroup{} 881 wg.Add(2) 882 883 go func() { 884 msg := <-bootCh 885 logger.Info("Bootstrap node got message, ", msg) 886 assert.True(t, msg.GetGossipMessage().GetStateRequest() != nil) 887 msg.Respond(&proto.GossipMessage{ 888 Content: &proto.GossipMessage_StateResponse{&proto.RemoteStateResponse{nil}}, 889 }) 890 wg.Done() 891 }() 892 893 go func() { 894 msg := <-peerCh 895 logger.Info("Peer node got an answer, ", msg) 896 assert.True(t, msg.GetGossipMessage().GetStateResponse() != nil) 897 wg.Done() 898 899 }() 900 901 readyCh := make(chan struct{}) 902 go func() { 903 wg.Wait() 904 readyCh <- struct{}{} 905 }() 906 907 time.Sleep(time.Duration(5) * time.Second) 908 logger.Info("Sending gossip message with remote state request") 909 910 chainID := common.ChainID(util.GetTestChainID()) 911 912 peer.g.Send(&proto.GossipMessage{ 913 Content: &proto.GossipMessage_StateRequest{&proto.RemoteStateRequest{0, 1}}, 914 }, &comm.RemotePeer{peer.g.PeersOfChannel(chainID)[0].Endpoint, peer.g.PeersOfChannel(chainID)[0].PKIid}) 915 logger.Info("Waiting until peers exchange messages") 916 917 select { 918 case <-readyCh: 919 { 920 logger.Info("Done!!!") 921 922 } 923 case <-time.After(time.Duration(10) * time.Second): 924 { 925 t.Fail() 926 } 927 } 928 } 929 930 // Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into 931 // local ledger, next spawning a new peer waiting for anti-entropy procedure to 932 // complete missing blocks. Since state transfer messages now batched, it is expected 933 // to see _exactly_ two messages with state transfer response. 934 func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) { 935 viper.Set("peer.fileSystemPath", "/tmp/tests/ledger/node") 936 ledgermgmt.InitializeTestEnv() 937 defer ledgermgmt.CleanupTestEnv() 938 939 bootPeer := newPeerNode(newGossipConfig(0), newCommitter(0), noopPeerIdentityAcceptor) 940 defer bootPeer.shutdown() 941 942 msgCount := defAntiEntropyBatchSize + 5 943 expectedMessagesCnt := 2 944 945 for i := 1; i <= msgCount; i++ { 946 rawblock := pcomm.NewBlock(uint64(i), []byte{}) 947 if b, err := pb.Marshal(rawblock); err == nil { 948 payload := &proto.Payload{ 949 SeqNum: uint64(i), 950 Data: b, 951 } 952 bootPeer.s.AddPayload(payload) 953 } else { 954 t.Fail() 955 } 956 } 957 958 peer := newPeerNode(newGossipConfig(1, 0), newCommitter(1), noopPeerIdentityAcceptor) 959 defer peer.shutdown() 960 961 naiveStateMsgPredicate := func(message interface{}) bool { 962 return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage() 963 } 964 _, peerCh := peer.g.Accept(naiveStateMsgPredicate, true) 965 966 messageCh := make(chan struct{}) 967 stopWaiting := make(chan struct{}) 968 969 // Number of submitted messages is defAntiEntropyBatchSize + 5, therefore 970 // expected number of batches is expectedMessagesCnt = 2. Following go routine 971 // makes sure it receives expected amount of messages and sends signal of success 972 // to continue the test 973 go func(expected int) { 974 cnt := 0 975 for cnt < expected { 976 select { 977 case <-peerCh: 978 { 979 cnt++ 980 } 981 982 case <-stopWaiting: 983 { 984 return 985 } 986 } 987 } 988 989 messageCh <- struct{}{} 990 }(expectedMessagesCnt) 991 992 // Waits for message which indicates that expected number of message batches received 993 // otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds 994 select { 995 case <-messageCh: 996 { 997 // Once we got message which indicate of two batches being received, 998 // making sure messages indeed committed. 999 waitUntilTrueOrTimeout(t, func() bool { 1000 if len(peer.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != 1 { 1001 logger.Debug("Peer discovery has not finished yet") 1002 return false 1003 } 1004 logger.Debug("All peer discovered each other!!!") 1005 return true 1006 }, 30*time.Second) 1007 1008 logger.Debug("Waiting for all blocks to arrive.") 1009 waitUntilTrueOrTimeout(t, func() bool { 1010 logger.Debug("Trying to see all peers get all blocks") 1011 height, err := peer.commit.LedgerHeight() 1012 if height != uint64(msgCount+1) || err != nil { 1013 return false 1014 } 1015 logger.Debug("All peers have same ledger height!!!") 1016 return true 1017 }, 60*time.Second) 1018 } 1019 case <-time.After(defAntiEntropyInterval*2 + time.Second*1): 1020 { 1021 close(stopWaiting) 1022 t.Fatal("Expected to receive two batches with missing payloads") 1023 } 1024 } 1025 } 1026 1027 func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) { 1028 ch := make(chan struct{}) 1029 go func() { 1030 logger.Debug("Started to spin off, until predicate will be satisfied.") 1031 for !predicate() { 1032 time.Sleep(1 * time.Second) 1033 } 1034 ch <- struct{}{} 1035 logger.Debug("Done.") 1036 }() 1037 1038 select { 1039 case <-ch: 1040 break 1041 case <-time.After(timeout): 1042 t.Fatal("Timeout has expired") 1043 break 1044 } 1045 logger.Debug("Stop waiting until timeout or true") 1046 }