github.com/yous1230/fabric@v2.0.0-beta.0.20191224111736-74345bee6ac2+incompatible/orderer/common/cluster/replication_test.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package cluster_test 8 9 import ( 10 "io/ioutil" 11 "path/filepath" 12 "strings" 13 "testing" 14 "time" 15 16 "github.com/golang/protobuf/proto" 17 "github.com/hyperledger/fabric-protos-go/common" 18 "github.com/hyperledger/fabric-protos-go/msp" 19 "github.com/hyperledger/fabric-protos-go/orderer" 20 "github.com/hyperledger/fabric/bccsp/sw" 21 "github.com/hyperledger/fabric/common/channelconfig" 22 "github.com/hyperledger/fabric/common/configtx" 23 "github.com/hyperledger/fabric/common/flogging" 24 "github.com/hyperledger/fabric/core/comm" 25 "github.com/hyperledger/fabric/orderer/common/cluster" 26 "github.com/hyperledger/fabric/orderer/common/cluster/mocks" 27 "github.com/hyperledger/fabric/orderer/common/localconfig" 28 "github.com/hyperledger/fabric/protoutil" 29 "github.com/pkg/errors" 30 "github.com/stretchr/testify/assert" 31 "github.com/stretchr/testify/mock" 32 "go.uber.org/zap" 33 "go.uber.org/zap/zapcore" 34 ) 35 36 func TestIsReplicationNeeded(t *testing.T) { 37 for _, testCase := range []struct { 38 name string 39 bootBlock *common.Block 40 systemChannelHeight uint64 41 systemChannelError error 42 expectedError string 43 replicationNeeded bool 44 }{ 45 { 46 name: "no replication needed", 47 systemChannelHeight: 100, 48 bootBlock: &common.Block{Header: &common.BlockHeader{Number: 99}}, 49 }, 50 { 51 name: "replication is needed - bootstrap block's index equal to height", 52 systemChannelHeight: 99, 53 bootBlock: &common.Block{Header: &common.BlockHeader{Number: 99}}, 54 replicationNeeded: true, 55 }, 56 { 57 name: "replication is needed - no ledger", 58 systemChannelHeight: 0, 59 bootBlock: &common.Block{Header: &common.BlockHeader{Number: 99}}, 60 replicationNeeded: true, 61 }, 62 { 63 name: "IO error", 64 systemChannelError: errors.New("IO error"), 65 expectedError: "IO error", 66 }, 67 } { 68 t.Run(testCase.name, func(t *testing.T) { 69 ledgerWriter := &mocks.LedgerWriter{} 70 ledgerWriter.On("Height").Return(testCase.systemChannelHeight) 71 72 ledgerFactory := &mocks.LedgerFactory{} 73 ledgerFactory.On("GetOrCreate", "system").Return(ledgerWriter, testCase.systemChannelError) 74 75 r := cluster.Replicator{ 76 Filter: cluster.AnyChannel, 77 Logger: flogging.MustGetLogger("test"), 78 BootBlock: testCase.bootBlock, 79 SystemChannel: "system", 80 LedgerFactory: ledgerFactory, 81 } 82 83 ok, err := r.IsReplicationNeeded() 84 if testCase.expectedError != "" { 85 assert.EqualError(t, err, testCase.expectedError) 86 } else { 87 assert.NoError(t, err) 88 assert.Equal(t, testCase.replicationNeeded, ok) 89 } 90 }) 91 } 92 } 93 94 func TestReplicateChainsFailures(t *testing.T) { 95 for _, testCase := range []struct { 96 name string 97 isProbeResponseDelayed bool 98 latestBlockSeqInOrderer uint64 99 ledgerFactoryError error 100 appendBlockError error 101 expectedPanic string 102 mutateBlocks func([]*common.Block) 103 channelsReturns []cluster.ChannelGenesisBlock 104 badResponse *orderer.DeliverResponse 105 }{ 106 { 107 name: "no block received", 108 expectedPanic: "Failed pulling system channel: " + 109 "failed obtaining the latest block for channel system", 110 isProbeResponseDelayed: true, 111 }, 112 { 113 name: "received service unavailable", 114 expectedPanic: "Failed pulling system channel: " + 115 "failed obtaining the latest block for channel system", 116 badResponse: &orderer.DeliverResponse{ 117 Type: &orderer.DeliverResponse_Status{ 118 Status: common.Status_SERVICE_UNAVAILABLE, 119 }, 120 }, 121 }, 122 { 123 name: "latest block seq is less than boot block seq", 124 expectedPanic: "Failed pulling system channel: " + 125 "latest height found among system channel(system) orderers is 19," + 126 " but the boot block's sequence is 21", 127 latestBlockSeqInOrderer: 18, 128 }, 129 { 130 name: "hash chain mismatch", 131 expectedPanic: "Failed pulling system channel: " + 132 "block header mismatch on sequence 11, " + 133 "expected 9cd61b7e9a5ea2d128cc877e5304e7205888175a8032d40b97db7412dca41d9e, got 010203", 134 latestBlockSeqInOrderer: 21, 135 mutateBlocks: func(systemChannelBlocks []*common.Block) { 136 systemChannelBlocks[len(systemChannelBlocks)/2].Header.PreviousHash = []byte{1, 2, 3} 137 }, 138 }, 139 { 140 name: "last pulled block doesn't match the boot block", 141 expectedPanic: "Block header mismatch on last system channel block," + 142 " expected 8ec93b2ef5ffdc302f0c0e24611be04ad2b17b099a1aeafd7cfb76a95923f146," + 143 " got e428decfc78f8e4c97b26da9c16f9d0b73f886dafa80477a0dd9bac7eb14fe7a", 144 latestBlockSeqInOrderer: 21, 145 mutateBlocks: func(systemChannelBlocks []*common.Block) { 146 systemChannelBlocks[21].Header.DataHash = nil 147 }, 148 }, 149 { 150 name: "failure in creating ledger", 151 latestBlockSeqInOrderer: 21, 152 ledgerFactoryError: errors.New("IO error"), 153 expectedPanic: "Failed to create a ledger for channel system: IO error", 154 }, 155 { 156 name: "failure in appending a block to the ledger", 157 latestBlockSeqInOrderer: 21, 158 appendBlockError: errors.New("IO error"), 159 expectedPanic: "Failed to write block [0]: IO error", 160 }, 161 { 162 name: "failure pulling the system chain", 163 latestBlockSeqInOrderer: 21, 164 expectedPanic: "Failed pulling system channel: " + 165 "failed obtaining the latest block for channel system", 166 isProbeResponseDelayed: true, 167 }, 168 { 169 name: "failure obtaining a ledger for a non participating channel", 170 latestBlockSeqInOrderer: 21, 171 channelsReturns: []cluster.ChannelGenesisBlock{ 172 {ChannelName: "channelWeAreNotPartOf"}, 173 }, 174 ledgerFactoryError: errors.New("IO error"), 175 expectedPanic: "Failed to create a ledger for channel channelWeAreNotPartOf: IO error", 176 }, 177 } { 178 t.Run(testCase.name, func(t *testing.T) { 179 systemChannelBlocks := createBlockChain(0, 21) 180 if testCase.mutateBlocks != nil { 181 testCase.mutateBlocks(systemChannelBlocks) 182 } 183 184 lw := &mocks.LedgerWriter{} 185 lw.On("Append", mock.Anything).Return(testCase.appendBlockError) 186 lw.On("Height").Return(uint64(0)) 187 188 lf := &mocks.LedgerFactory{} 189 lf.On("GetOrCreate", "system").Return(lw, testCase.ledgerFactoryError) 190 lf.On("GetOrCreate", "channelWeAreNotPartOf").Return(lw, testCase.ledgerFactoryError) 191 192 osn := newClusterNode(t) 193 defer osn.stop() 194 195 dialer := newCountingDialer() 196 bp := newBlockPuller(dialer, osn.srv.Address()) 197 // Put a big timeout, to reduce chance of flakes when the server gets stuck 198 // and we get an un-called for timeout. 199 bp.FetchTimeout = time.Hour 200 201 cl := &mocks.ChannelLister{} 202 cl.On("Channels").Return(testCase.channelsReturns) 203 cl.On("Close") 204 205 r := cluster.Replicator{ 206 Filter: cluster.AnyChannel, 207 AmIPartOfChannel: func(configBlock *common.Block) error { 208 return cluster.ErrNotInChannel 209 }, 210 Logger: flogging.MustGetLogger("test"), 211 BootBlock: systemChannelBlocks[21], 212 SystemChannel: "system", 213 LedgerFactory: lf, 214 Puller: bp, 215 ChannelLister: cl, 216 } 217 218 if len(testCase.channelsReturns) > 0 { 219 simulateNonParticipantChannelPull(osn) 220 } 221 222 if testCase.badResponse != nil { 223 osn.blockResponses <- testCase.badResponse 224 } 225 226 if !testCase.isProbeResponseDelayed { 227 osn.enqueueResponse(testCase.latestBlockSeqInOrderer) 228 osn.enqueueResponse(testCase.latestBlockSeqInOrderer) 229 } else { 230 // Send a nil to force an EOF to the client 231 osn.blockResponses <- nil 232 } 233 osn.addExpectProbeAssert() 234 osn.addExpectProbeAssert() 235 osn.addExpectPullAssert(0) 236 237 if !testCase.isProbeResponseDelayed { 238 for _, block := range systemChannelBlocks { 239 osn.blockResponses <- &orderer.DeliverResponse{ 240 Type: &orderer.DeliverResponse_Block{Block: block}, 241 } 242 } 243 } else { 244 // Send a nil to force an EOF to the client 245 osn.blockResponses <- nil 246 } 247 248 assert.PanicsWithValue(t, testCase.expectedPanic, func() { r.ReplicateChains() }) 249 bp.Close() 250 dialer.assertAllConnectionsClosed(t) 251 }) 252 } 253 } 254 255 func TestPullChannelFailure(t *testing.T) { 256 blockchain := createBlockChain(0, 5) 257 for _, testcase := range []struct { 258 name string 259 genesisBlockSequence int 260 thirdBlockSequence int 261 }{ 262 { 263 name: "Failed to pull genesis block", 264 genesisBlockSequence: 1, 265 }, 266 { 267 name: "Failed to pull some non genesis block", 268 genesisBlockSequence: 0, 269 thirdBlockSequence: 0, 270 }, 271 } { 272 t.Run(testcase.name, func(t *testing.T) { 273 lw := &mocks.LedgerWriter{} 274 lw.On("Append", mock.Anything).Return(nil) 275 lw.On("Height").Return(uint64(0)) 276 277 lf := &mocks.LedgerFactory{} 278 lf.On("GetOrCreate", "mychannel").Return(lw, nil) 279 280 osn := newClusterNode(t) 281 defer osn.stop() 282 283 enqueueBlock := func(seq int) { 284 osn.blockResponses <- &orderer.DeliverResponse{ 285 Type: &orderer.DeliverResponse_Block{ 286 Block: blockchain[seq], 287 }, 288 } 289 } 290 291 dialer := newCountingDialer() 292 bp := newBlockPuller(dialer, osn.srv.Address()) 293 // Put a big timeout, to reduce chance of flakes when the server gets stuck 294 // and we get an un-called for timeout. 295 bp.FetchTimeout = time.Hour 296 bp.MaxPullBlockRetries = 1 297 // Do not buffer blocks in memory 298 bp.MaxTotalBufferBytes = 1 299 300 r := cluster.Replicator{ 301 Filter: cluster.AnyChannel, 302 AmIPartOfChannel: func(configBlock *common.Block) error { 303 return nil 304 }, 305 Logger: flogging.MustGetLogger("test"), 306 SystemChannel: "system", 307 LedgerFactory: lf, 308 Puller: bp, 309 } 310 311 osn.addExpectProbeAssert() 312 enqueueBlock(5) 313 osn.addExpectProbeAssert() 314 enqueueBlock(5) 315 osn.addExpectPullAssert(0) 316 enqueueBlock(testcase.genesisBlockSequence) 317 enqueueBlock(1) 318 enqueueBlock(testcase.thirdBlockSequence) 319 320 err := r.PullChannel("mychannel") 321 assert.Equal(t, cluster.ErrRetryCountExhausted, err) 322 }) 323 } 324 325 } 326 327 func TestPullerConfigFromTopLevelConfig(t *testing.T) { 328 signer := &mocks.SignerSerializer{} 329 expected := cluster.PullerConfig{ 330 Channel: "system", 331 MaxTotalBufferBytes: 100, 332 Signer: signer, 333 TLSCert: []byte{3, 2, 1}, 334 TLSKey: []byte{1, 2, 3}, 335 Timeout: time.Hour, 336 } 337 338 topLevelConfig := &localconfig.TopLevel{ 339 General: localconfig.General{ 340 Cluster: localconfig.Cluster{ 341 ReplicationBufferSize: 100, 342 RPCTimeout: time.Hour, 343 }, 344 }, 345 } 346 347 config := cluster.PullerConfigFromTopLevelConfig("system", topLevelConfig, []byte{1, 2, 3}, []byte{3, 2, 1}, signer) 348 assert.Equal(t, expected, config) 349 } 350 351 func TestReplicateChainsChannelClassificationFailure(t *testing.T) { 352 // Scenario: We are unable to classify whether we are part of the channel, 353 // so we crash, because this is a programming error. 354 355 block30WithConfigBlockOf21 := protoutil.NewBlock(30, nil) 356 block30WithConfigBlockOf21.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{ 357 Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: 21}), 358 }) 359 360 osn := newClusterNode(t) 361 defer osn.stop() 362 osn.blockResponses = make(chan *orderer.DeliverResponse, 1000) 363 364 dialer := newCountingDialer() 365 bp := newBlockPuller(dialer, osn.srv.Address()) 366 bp.FetchTimeout = time.Hour 367 368 channelLister := &mocks.ChannelLister{} 369 channelLister.On("Channels").Return([]cluster.ChannelGenesisBlock{{ChannelName: "A"}}) 370 channelLister.On("Close") 371 372 // We probe for the latest block of the orderer 373 osn.addExpectProbeAssert() 374 osn.enqueueResponse(30) 375 376 // And now pull it again (first poll and then pull it for real). 377 osn.addExpectProbeAssert() 378 osn.enqueueResponse(30) 379 osn.addExpectPullAssert(30) 380 osn.blockResponses <- &orderer.DeliverResponse{ 381 Type: &orderer.DeliverResponse_Block{Block: block30WithConfigBlockOf21}, 382 } 383 // Now we pull the latest config block extracted from the previous block pulled. 384 // Beforehand we reconnect to the orderer, so we put an artificial signal to close the stream on the server side, 385 // in order to expect for a new stream to be established. 386 osn.blockResponses <- nil 387 // The orderer's last block's sequence is 30, 388 osn.addExpectProbeAssert() 389 osn.enqueueResponse(30) 390 // And the Replicator now asks for block 21. 391 osn.enqueueResponse(21) 392 osn.addExpectPullAssert(21) 393 394 r := cluster.Replicator{ 395 Filter: cluster.AnyChannel, 396 AmIPartOfChannel: func(configBlock *common.Block) error { 397 return errors.New("oops") 398 }, 399 Logger: flogging.MustGetLogger("test"), 400 SystemChannel: "system", 401 ChannelLister: channelLister, 402 Puller: bp, 403 } 404 405 assert.PanicsWithValue(t, "Failed classifying whether I belong to channel A: oops, skipping chain retrieval", func() { 406 r.ReplicateChains() 407 }) 408 409 bp.Close() 410 dialer.assertAllConnectionsClosed(t) 411 } 412 413 func TestReplicateChainsGreenPath(t *testing.T) { 414 // Scenario: There are 5 channels in the system: A-E. 415 // We are in channel A but not in channel B, therefore 416 // we should pull channel A and then the system channel. 417 // However, this is not the first attempt of replication for 418 // our node, but the second. 419 // In the past, the node pulled 10 blocks of channel A and crashed. 420 // Therefore, it should pull blocks, but commit for channel A 421 // only blocks starting from block number 10. 422 // For channel C - we are forbidden from pulling any blocks. 423 // Channel D is a deserted channel - all OSNs have left it, 424 // therefore we should not pull it at all. 425 // Channel E cannot be pulled at all, due to the OSN being unavailable 426 // at that time. 427 428 systemChannelBlocks := createBlockChain(0, 21) 429 block30WithConfigBlockOf21 := protoutil.NewBlock(30, nil) 430 block30WithConfigBlockOf21.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{ 431 Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: 21}), 432 }) 433 434 osn := newClusterNode(t) 435 defer osn.stop() 436 osn.blockResponses = make(chan *orderer.DeliverResponse, 1000) 437 438 dialer := newCountingDialer() 439 bp := newBlockPuller(dialer, osn.srv.Address()) 440 bp.FetchTimeout = time.Hour 441 bp.MaxPullBlockRetries = 1 442 443 channelLister := &mocks.ChannelLister{} 444 channelLister.On("Channels").Return([]cluster.ChannelGenesisBlock{ 445 {ChannelName: "E", GenesisBlock: fakeGB}, 446 {ChannelName: "D", GenesisBlock: fakeGB}, {ChannelName: "C", GenesisBlock: fakeGB}, 447 {ChannelName: "A", GenesisBlock: fakeGB}, {ChannelName: "B", GenesisBlock: fakeGB}, 448 }) 449 channelLister.On("Close") 450 451 amIPartOfChannelMock := &mock.Mock{} 452 // For channel A 453 amIPartOfChannelMock.On("func13").Return(nil).Once() 454 // For channel B 455 amIPartOfChannelMock.On("func13").Return(cluster.ErrNotInChannel).Once() 456 457 // 22 is for the system channel, and 31 is for channel A, and for channel B we only need 1 block (the GB). 458 blocksCommittedToLedgerA := make(chan *common.Block, 31) 459 blocksCommittedToLedgerB := make(chan *common.Block, 1) 460 blocksCommittedToLedgerC := make(chan *common.Block, 1) 461 blocksCommittedToLedgerD := make(chan *common.Block, 1) 462 blocksCommittedToLedgerE := make(chan *common.Block, 1) 463 blocksCommittedToSystemLedger := make(chan *common.Block, 22) 464 // Put 10 blocks in the ledger of channel A, to simulate 465 // that the ledger had blocks when the node started. 466 for seq := 0; seq < 10; seq++ { 467 blocksCommittedToLedgerA <- &common.Block{ 468 Header: &common.BlockHeader{Number: uint64(seq)}, 469 } 470 } 471 472 lwA := &mocks.LedgerWriter{} 473 lwA.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) { 474 blocksCommittedToLedgerA <- arg.Get(0).(*common.Block) 475 }) 476 lwA.On("Height").Return(func() uint64 { 477 return uint64(len(blocksCommittedToLedgerA)) 478 }) 479 480 lwB := &mocks.LedgerWriter{} 481 lwB.On("Height").Return(func() uint64 { 482 return uint64(len(blocksCommittedToLedgerB)) 483 }) 484 lwB.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) { 485 blocksCommittedToLedgerB <- arg.Get(0).(*common.Block) 486 }) 487 488 lwC := &mocks.LedgerWriter{} 489 lwC.On("Height").Return(func() uint64 { 490 return uint64(len(blocksCommittedToLedgerC)) 491 }) 492 lwC.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) { 493 blocksCommittedToLedgerC <- arg.Get(0).(*common.Block) 494 }) 495 496 lwD := &mocks.LedgerWriter{} 497 lwD.On("Height").Return(func() uint64 { 498 return uint64(len(blocksCommittedToLedgerD)) 499 }) 500 lwD.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) { 501 blocksCommittedToLedgerD <- arg.Get(0).(*common.Block) 502 }) 503 504 lwE := &mocks.LedgerWriter{} 505 lwE.On("Height").Return(func() uint64 { 506 return uint64(len(blocksCommittedToLedgerE)) 507 }) 508 lwE.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) { 509 blocksCommittedToLedgerE <- arg.Get(0).(*common.Block) 510 }) 511 512 lwSystem := &mocks.LedgerWriter{} 513 lwSystem.On("Append", mock.Anything).Return(nil).Run(func(arg mock.Arguments) { 514 blocksCommittedToSystemLedger <- arg.Get(0).(*common.Block) 515 }) 516 lwSystem.On("Height").Return(func() uint64 { 517 return uint64(len(blocksCommittedToSystemLedger)) 518 }) 519 520 lf := &mocks.LedgerFactory{} 521 lf.On("Close") 522 lf.On("GetOrCreate", "A").Return(lwA, nil) 523 lf.On("GetOrCreate", "B").Return(lwB, nil) 524 lf.On("GetOrCreate", "C").Return(lwC, nil) 525 lf.On("GetOrCreate", "D").Return(lwD, nil) 526 lf.On("GetOrCreate", "E").Return(lwE, nil) 527 lf.On("GetOrCreate", "system").Return(lwSystem, nil) 528 529 r := cluster.Replicator{ 530 Filter: cluster.AnyChannel, 531 LedgerFactory: lf, 532 AmIPartOfChannel: func(configBlock *common.Block) error { 533 return amIPartOfChannelMock.Called().Error(0) 534 }, 535 Logger: flogging.MustGetLogger("test"), 536 SystemChannel: "system", 537 ChannelLister: channelLister, 538 Puller: bp, 539 BootBlock: systemChannelBlocks[21], 540 } 541 542 // The first thing the orderer gets is a seek to channel E. 543 // Unfortunately, it's not available! 544 osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) { 545 // Ensure the seek came to the right channel 546 assert.NotNil(osn.t, info.GetStart().GetNewest()) 547 assert.Equal(t, "E", actualChannel) 548 } 549 // Send an EOF down the stream. 550 osn.blockResponses <- nil 551 552 // The second thing the orderer gets is a seek to channel D, 553 // which is followed by a response of service unavailable 554 osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) { 555 // Ensure the seek came to the right channel 556 assert.NotNil(osn.t, info.GetStart().GetNewest()) 557 assert.Equal(t, "D", actualChannel) 558 } 559 osn.blockResponses <- &orderer.DeliverResponse{ 560 Type: &orderer.DeliverResponse_Status{ 561 Status: common.Status_SERVICE_UNAVAILABLE, 562 }, 563 } 564 565 // The third thing the orderer gets is a seek to channel C, 566 // which is followed by a response of forbidden 567 osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) { 568 // Ensure the seek came to the right channel 569 assert.NotNil(osn.t, info.GetStart().GetNewest()) 570 assert.Equal(t, "C", actualChannel) 571 } 572 573 osn.blockResponses <- &orderer.DeliverResponse{ 574 Type: &orderer.DeliverResponse_Status{ 575 Status: common.Status_FORBIDDEN, 576 }, 577 } 578 579 for _, channel := range []string{"A", "B"} { 580 channel := channel 581 // First, the orderer needs to figure out whether it is in the channel, 582 // so it reaches to find the latest block from all orderers to get 583 // the latest config block and see whether it is among the consenters. 584 585 // Orderer is expecting a poll for last block of the current channel 586 osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) { 587 // Ensure the seek came to the right channel 588 assert.NotNil(osn.t, info.GetStart().GetNewest()) 589 assert.Equal(t, channel, actualChannel) 590 } 591 592 // Orderer returns its last block is 30. 593 // This is needed to get the latest height by comparing among all orderers. 594 osn.enqueueResponse(30) 595 596 // First we poll for the block sequence we got previously again, from some orderer. 597 osn.addExpectProbeAssert() 598 osn.enqueueResponse(30) 599 600 // And afterwards pull the block from the first orderer. 601 osn.addExpectPullAssert(30) 602 osn.blockResponses <- &orderer.DeliverResponse{ 603 Type: &orderer.DeliverResponse_Block{Block: block30WithConfigBlockOf21}, 604 } 605 // And the last config block is pulled via reconnecting to the orderer. 606 osn.blockResponses <- nil 607 // The orderer's last block's sequence is 30, 608 osn.addExpectProbeAssert() 609 osn.enqueueResponse(30) 610 // And the Replicator now asks for block 21. 611 osn.enqueueResponse(21) 612 osn.addExpectPullAssert(21) 613 // We always close the connection before attempting to pull the next block 614 osn.blockResponses <- nil 615 } 616 617 // Next, the Replicator figures out the latest block sequence for that chain 618 // to know until when to pull 619 620 // We expect a probe for channel A only, because channel B isn't in the channel 621 osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) { 622 // Ensure the seek came to the right channel 623 assert.NotNil(osn.t, info.GetStart().GetNewest()) 624 assert.Equal(t, "A", actualChannel) 625 } 626 osn.enqueueResponse(30) 627 // From this point onwards, we pull the blocks for the chain. 628 osn.enqueueResponse(30) 629 osn.addExpectProbeAssert() 630 osn.addExpectPullAssert(10) 631 // Enqueue 31 blocks in its belly 632 for _, block := range createBlockChain(10, 30) { 633 osn.blockResponses <- &orderer.DeliverResponse{ 634 Type: &orderer.DeliverResponse_Block{Block: block}, 635 } 636 } 637 // Signal the orderer to stop sending us blocks since we're going to reconnect 638 // to it to ask for the next channel 639 osn.blockResponses <- nil 640 641 // Now we define assertions for the system channel 642 // Pull assertions for the system channel 643 osn.seekAssertions <- func(info *orderer.SeekInfo, actualChannel string) { 644 // Ensure the seek came to the system channel. 645 assert.NotNil(osn.t, info.GetStart().GetNewest()) 646 assert.Equal(t, "system", actualChannel) 647 } 648 osn.blockResponses <- &orderer.DeliverResponse{ 649 Type: &orderer.DeliverResponse_Block{Block: systemChannelBlocks[21]}, 650 } 651 osn.addExpectProbeAssert() 652 osn.enqueueResponse(21) 653 osn.addExpectPullAssert(0) 654 for _, block := range systemChannelBlocks { 655 osn.blockResponses <- &orderer.DeliverResponse{ 656 Type: &orderer.DeliverResponse_Block{Block: block}, 657 } 658 } 659 660 // This is where all the work is done. 661 // The above lines were all assertions and preparations 662 // for the expected flow of the test. 663 r.ReplicateChains() 664 665 // We replicated the chains, so all that left is to ensure 666 // the blocks were committed in order, and all blocks we expected 667 // to be committed (for channel A and the system channel) were committed. 668 close(blocksCommittedToLedgerA) 669 close(blocksCommittedToSystemLedger) 670 assert.Len(t, blocksCommittedToLedgerA, cap(blocksCommittedToLedgerA)) 671 assert.Len(t, blocksCommittedToSystemLedger, cap(blocksCommittedToSystemLedger)) 672 assert.Len(t, blocksCommittedToLedgerB, 1) 673 assert.Len(t, blocksCommittedToLedgerC, 1) 674 assert.Len(t, blocksCommittedToLedgerD, 1) 675 assert.Len(t, blocksCommittedToLedgerE, 1) 676 // Count the blocks for channel A 677 var expectedSequence uint64 678 for block := range blocksCommittedToLedgerA { 679 assert.Equal(t, expectedSequence, block.Header.Number) 680 expectedSequence++ 681 } 682 683 // Count the blocks for the system channel 684 expectedSequence = uint64(0) 685 for block := range blocksCommittedToSystemLedger { 686 assert.Equal(t, expectedSequence, block.Header.Number) 687 expectedSequence++ 688 } 689 690 bp.Close() 691 dialer.assertAllConnectionsClosed(t) 692 } 693 694 func TestParticipant(t *testing.T) { 695 for _, testCase := range []struct { 696 name string 697 heightsByEndpoints map[string]uint64 698 heightsByEndpointsErr error 699 latestBlockSeq uint64 700 latestBlock *common.Block 701 latestConfigBlockSeq uint64 702 latestConfigBlock *common.Block 703 expectedError string 704 predicateReturns error 705 }{ 706 { 707 name: "No available orderer", 708 expectedError: cluster.ErrRetryCountExhausted.Error(), 709 }, 710 { 711 name: "Unauthorized for the channel", 712 expectedError: cluster.ErrForbidden.Error(), 713 heightsByEndpointsErr: cluster.ErrForbidden, 714 }, 715 { 716 name: "No OSN services the channel", 717 expectedError: cluster.ErrServiceUnavailable.Error(), 718 heightsByEndpointsErr: cluster.ErrServiceUnavailable, 719 }, 720 { 721 name: "Pulled block has no metadata", 722 heightsByEndpoints: map[string]uint64{ 723 "orderer.example.com:7050": 100, 724 }, 725 latestBlockSeq: uint64(99), 726 latestBlock: &common.Block{}, 727 expectedError: "no metadata in block", 728 }, 729 { 730 name: "Pulled block has no last config sequence in metadata", 731 heightsByEndpoints: map[string]uint64{ 732 "orderer.example.com:7050": 100, 733 }, 734 latestBlockSeq: uint64(99), 735 latestBlock: &common.Block{ 736 Metadata: &common.BlockMetadata{ 737 Metadata: [][]byte{{1, 2, 3}}, 738 }, 739 }, 740 expectedError: "no metadata in block", 741 }, 742 { 743 name: "Pulled block's metadata is malformed", 744 heightsByEndpoints: map[string]uint64{ 745 "orderer.example.com:7050": 100, 746 }, 747 latestBlockSeq: uint64(99), 748 latestBlock: &common.Block{ 749 Metadata: &common.BlockMetadata{ 750 Metadata: [][]byte{{1, 2, 3}, {1, 2, 3}}, 751 }, 752 }, 753 expectedError: "error unmarshaling metadata from" + 754 " block at index [LAST_CONFIG]: proto: common.Metadata: illegal tag 0 (wire type 1)", 755 }, 756 { 757 name: "Pulled block's metadata is valid and has a last config", 758 heightsByEndpoints: map[string]uint64{ 759 "orderer.example.com:7050": 100, 760 }, 761 latestBlockSeq: uint64(99), 762 latestBlock: &common.Block{ 763 Metadata: &common.BlockMetadata{ 764 Metadata: [][]byte{{1, 2, 3}, protoutil.MarshalOrPanic(&common.Metadata{ 765 Value: protoutil.MarshalOrPanic(&common.LastConfig{ 766 Index: 42, 767 }), 768 })}, 769 }, 770 }, 771 latestConfigBlockSeq: 42, 772 latestConfigBlock: &common.Block{Header: &common.BlockHeader{Number: 42}}, 773 predicateReturns: cluster.ErrNotInChannel, 774 }, 775 { 776 name: "Failed pulling last block", 777 expectedError: cluster.ErrRetryCountExhausted.Error(), 778 heightsByEndpoints: map[string]uint64{ 779 "orderer.example.com:7050": 100, 780 }, 781 latestBlockSeq: uint64(99), 782 latestBlock: nil, 783 }, 784 { 785 name: "Failed pulling last config block", 786 expectedError: cluster.ErrRetryCountExhausted.Error(), 787 heightsByEndpoints: map[string]uint64{ 788 "orderer.example.com:7050": 100, 789 }, 790 latestBlockSeq: uint64(99), 791 latestBlock: &common.Block{ 792 Metadata: &common.BlockMetadata{ 793 Metadata: [][]byte{{1, 2, 3}, protoutil.MarshalOrPanic(&common.Metadata{ 794 Value: protoutil.MarshalOrPanic(&common.LastConfig{ 795 Index: 42, 796 }), 797 })}, 798 }, 799 }, 800 latestConfigBlockSeq: 42, 801 latestConfigBlock: nil, 802 }, 803 } { 804 t.Run(testCase.name, func(t *testing.T) { 805 configBlocks := make(chan *common.Block, 1) 806 predicate := func(configBlock *common.Block) error { 807 configBlocks <- configBlock 808 return testCase.predicateReturns 809 } 810 puller := &mocks.ChainPuller{} 811 puller.On("HeightsByEndpoints").Return(testCase.heightsByEndpoints, testCase.heightsByEndpointsErr) 812 puller.On("PullBlock", testCase.latestBlockSeq).Return(testCase.latestBlock) 813 puller.On("PullBlock", testCase.latestConfigBlockSeq).Return(testCase.latestConfigBlock) 814 puller.On("Close") 815 816 err := cluster.Participant(puller, predicate) 817 if testCase.expectedError != "" { 818 assert.EqualError(t, err, testCase.expectedError) 819 assert.Len(t, configBlocks, 0) 820 } else { 821 assert.Len(t, configBlocks, 1) 822 assert.Equal(t, err, testCase.predicateReturns) 823 } 824 }) 825 } 826 } 827 828 func TestBlockPullerFromConfigBlockFailures(t *testing.T) { 829 blockBytes, err := ioutil.ReadFile("testdata/mychannel.block") 830 assert.NoError(t, err) 831 832 validBlock := &common.Block{} 833 assert.NoError(t, proto.Unmarshal(blockBytes, validBlock)) 834 835 cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore()) 836 assert.NoError(t, err) 837 838 for _, testCase := range []struct { 839 name string 840 expectedErr string 841 pullerConfig cluster.PullerConfig 842 block *common.Block 843 }{ 844 { 845 name: "nil block", 846 expectedErr: "nil block", 847 }, 848 { 849 name: "invalid block", 850 expectedErr: "block data is nil", 851 block: &common.Block{}, 852 }, 853 { 854 name: "bad envelope inside block", 855 expectedErr: "failed extracting bundle from envelope: " + 856 "failed to unmarshal payload from envelope: " + 857 "error unmarshaling Payload: " + 858 "proto: common.Payload: illegal tag 0 (wire type 1)", 859 block: &common.Block{ 860 Data: &common.BlockData{ 861 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 862 Payload: []byte{1, 2, 3}, 863 })}, 864 }, 865 }, 866 }, 867 { 868 name: "invalid TLS certificate", 869 expectedErr: "unable to decode TLS certificate PEM: ////", 870 block: validBlock, 871 pullerConfig: cluster.PullerConfig{ 872 TLSCert: []byte{255, 255, 255}, 873 }, 874 }, 875 } { 876 t.Run(testCase.name, func(t *testing.T) { 877 verifierRetriever := &mocks.VerifierRetriever{} 878 verifierRetriever.On("RetrieveVerifier", mock.Anything).Return(&cluster.NoopBlockVerifier{}) 879 bp, err := cluster.BlockPullerFromConfigBlock(testCase.pullerConfig, testCase.block, verifierRetriever, cryptoProvider) 880 assert.EqualError(t, err, testCase.expectedErr) 881 assert.Nil(t, bp) 882 }) 883 } 884 } 885 886 func testBlockPullerFromConfig(t *testing.T, blockVerifiers []cluster.BlockVerifier, expectedLogMsg string, iterations int) { 887 verifierRetriever := &mocks.VerifierRetriever{} 888 for _, blockVerifier := range blockVerifiers { 889 verifierRetriever.On("RetrieveVerifier", mock.Anything).Return(blockVerifier).Once() 890 } 891 892 caCert, err := ioutil.ReadFile(filepath.Join("testdata", "ca.crt")) 893 assert.NoError(t, err) 894 895 tlsCert, err := ioutil.ReadFile(filepath.Join("testdata", "server.crt")) 896 assert.NoError(t, err) 897 898 tlsKey, err := ioutil.ReadFile(filepath.Join("testdata", "server.key")) 899 assert.NoError(t, err) 900 901 osn := newClusterNode(t) 902 osn.srv.Stop() 903 // Replace the gRPC server with a TLS one 904 osn.srv, err = comm.NewGRPCServer("127.0.0.1:0", comm.ServerConfig{ 905 SecOpts: comm.SecureOptions{ 906 Key: tlsKey, 907 RequireClientCert: true, 908 Certificate: tlsCert, 909 ClientRootCAs: [][]byte{caCert}, 910 UseTLS: true, 911 }, 912 }) 913 assert.NoError(t, err) 914 orderer.RegisterAtomicBroadcastServer(osn.srv.Server(), osn) 915 // And start it 916 go osn.srv.Start() 917 defer osn.stop() 918 919 // Start from a valid configuration block 920 blockBytes, err := ioutil.ReadFile(filepath.Join("testdata", "mychannel.block")) 921 assert.NoError(t, err) 922 923 validBlock := &common.Block{} 924 assert.NoError(t, proto.Unmarshal(blockBytes, validBlock)) 925 926 // And inject into it a 127.0.0.1 orderer endpoint endpoint and a new TLS CA certificate. 927 injectTLSCACert(t, validBlock, caCert) 928 injectGlobalOrdererEndpoint(t, validBlock, osn.srv.Address()) 929 validBlock.Header.DataHash = protoutil.BlockDataHash(validBlock.Data) 930 931 for attempt := 0; attempt < iterations; attempt++ { 932 blockMsg := &orderer.DeliverResponse_Block{ 933 Block: validBlock, 934 } 935 936 osn.blockResponses <- &orderer.DeliverResponse{ 937 Type: blockMsg, 938 } 939 940 osn.blockResponses <- &orderer.DeliverResponse{ 941 Type: blockMsg, 942 } 943 944 osn.blockResponses <- nil 945 946 osn.addExpectProbeAssert() 947 osn.addExpectPullAssert(0) 948 } 949 950 cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore()) 951 assert.NoError(t, err) 952 953 bp, err := cluster.BlockPullerFromConfigBlock(cluster.PullerConfig{ 954 TLSCert: tlsCert, 955 TLSKey: tlsKey, 956 MaxTotalBufferBytes: 1, 957 Channel: "mychannel", 958 Signer: &mocks.SignerSerializer{}, 959 Timeout: time.Hour, 960 }, validBlock, verifierRetriever, cryptoProvider) 961 bp.RetryTimeout = time.Millisecond * 10 962 assert.NoError(t, err) 963 defer bp.Close() 964 965 var seenExpectedLogMsg bool 966 967 bp.Logger = bp.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error { 968 if strings.Contains(entry.Message, expectedLogMsg) { 969 seenExpectedLogMsg = true 970 } 971 return nil 972 })) 973 974 block := bp.PullBlock(0) 975 assert.Equal(t, uint64(0), block.Header.Number) 976 assert.True(t, seenExpectedLogMsg) 977 } 978 979 func TestSkipPullingPulledChannels(t *testing.T) { 980 blockchain := createBlockChain(0, 5) 981 lw := &mocks.LedgerWriter{} 982 lw.On("Height").Return(uint64(6)) 983 984 lf := &mocks.LedgerFactory{} 985 lf.On("GetOrCreate", "mychannel").Return(lw, nil) 986 987 osn := newClusterNode(t) 988 defer osn.stop() 989 990 enqueueBlock := func(seq int) { 991 osn.blockResponses <- &orderer.DeliverResponse{ 992 Type: &orderer.DeliverResponse_Block{ 993 Block: blockchain[seq], 994 }, 995 } 996 } 997 998 dialer := newCountingDialer() 999 bp := newBlockPuller(dialer, osn.srv.Address()) 1000 bp.FetchTimeout = time.Hour 1001 1002 r := cluster.Replicator{ 1003 Filter: cluster.AnyChannel, 1004 AmIPartOfChannel: func(configBlock *common.Block) error { 1005 return nil 1006 }, 1007 Logger: flogging.MustGetLogger("test"), 1008 SystemChannel: "system", 1009 LedgerFactory: lf, 1010 Puller: bp, 1011 } 1012 1013 var detectedChannelPulled bool 1014 r.Logger = r.Logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error { 1015 if strings.Contains(entry.Message, "Latest height found (6) is equal to our height, skipping pulling channel mychannel") { 1016 detectedChannelPulled = true 1017 } 1018 return nil 1019 })) 1020 1021 osn.addExpectProbeAssert() 1022 enqueueBlock(5) 1023 osn.addExpectProbeAssert() 1024 enqueueBlock(5) 1025 1026 err := r.PullChannel("mychannel") 1027 assert.NoError(t, err) 1028 assert.True(t, detectedChannelPulled) 1029 } 1030 1031 func TestBlockPullerFromConfigBlockGreenPath(t *testing.T) { 1032 for _, testCase := range []struct { 1033 description string 1034 blockVerifiers []cluster.BlockVerifier 1035 expectedLogMessage string 1036 iterations int 1037 }{ 1038 { 1039 description: "Success", 1040 blockVerifiers: []cluster.BlockVerifier{&cluster.NoopBlockVerifier{}}, 1041 expectedLogMessage: "Got block [0] of size", 1042 iterations: 1, 1043 }, 1044 { 1045 description: "Failure", 1046 iterations: 2, 1047 // First time it returns nil, second time returns like the success case 1048 blockVerifiers: []cluster.BlockVerifier{nil, &cluster.NoopBlockVerifier{}}, 1049 expectedLogMessage: "Failed verifying received blocks: " + 1050 "couldn't acquire verifier for channel mychannel", 1051 }, 1052 } { 1053 t.Run(testCase.description, func(t *testing.T) { 1054 testBlockPullerFromConfig(t, testCase.blockVerifiers, 1055 testCase.expectedLogMessage, testCase.iterations) 1056 }) 1057 } 1058 } 1059 1060 func TestNoopBlockVerifier(t *testing.T) { 1061 v := &cluster.NoopBlockVerifier{} 1062 assert.Nil(t, v.VerifyBlockSignature(nil, nil)) 1063 } 1064 1065 func injectGlobalOrdererEndpoint(t *testing.T, block *common.Block, endpoint string) { 1066 ordererAddresses := channelconfig.OrdererAddressesValue([]string{endpoint}) 1067 // Unwrap the layers until we reach the orderer addresses 1068 env, err := protoutil.ExtractEnvelope(block, 0) 1069 assert.NoError(t, err) 1070 payload, err := protoutil.UnmarshalPayload(env.Payload) 1071 assert.NoError(t, err) 1072 confEnv, err := configtx.UnmarshalConfigEnvelope(payload.Data) 1073 assert.NoError(t, err) 1074 // Replace the orderer addresses 1075 confEnv.Config.ChannelGroup.Values[ordererAddresses.Key()].Value = protoutil.MarshalOrPanic(ordererAddresses.Value()) 1076 // Remove the per org addresses, if applicable 1077 ordererGrps := confEnv.Config.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Groups 1078 for _, grp := range ordererGrps { 1079 if grp.Values[channelconfig.EndpointsKey] == nil { 1080 continue 1081 } 1082 grp.Values[channelconfig.EndpointsKey].Value = nil 1083 } 1084 // And put it back into the block 1085 payload.Data = protoutil.MarshalOrPanic(confEnv) 1086 env.Payload = protoutil.MarshalOrPanic(payload) 1087 block.Data.Data[0] = protoutil.MarshalOrPanic(env) 1088 } 1089 1090 func injectTLSCACert(t *testing.T, block *common.Block, tlsCA []byte) { 1091 // Unwrap the layers until we reach the TLS CA certificates 1092 env, err := protoutil.ExtractEnvelope(block, 0) 1093 assert.NoError(t, err) 1094 payload, err := protoutil.UnmarshalPayload(env.Payload) 1095 assert.NoError(t, err) 1096 confEnv, err := configtx.UnmarshalConfigEnvelope(payload.Data) 1097 assert.NoError(t, err) 1098 mspKey := confEnv.Config.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Groups["OrdererOrg"].Values[channelconfig.MSPKey] 1099 rawMSPConfig := mspKey.Value 1100 mspConf := &msp.MSPConfig{} 1101 proto.Unmarshal(rawMSPConfig, mspConf) 1102 fabricMSPConf := &msp.FabricMSPConfig{} 1103 proto.Unmarshal(mspConf.Config, fabricMSPConf) 1104 // Replace the TLS root certs with the given ones 1105 fabricMSPConf.TlsRootCerts = [][]byte{tlsCA} 1106 // And put it back into the block 1107 mspConf.Config = protoutil.MarshalOrPanic(fabricMSPConf) 1108 mspKey.Value = protoutil.MarshalOrPanic(mspConf) 1109 payload.Data = protoutil.MarshalOrPanic(confEnv) 1110 env.Payload = protoutil.MarshalOrPanic(payload) 1111 block.Data.Data[0] = protoutil.MarshalOrPanic(env) 1112 } 1113 1114 func TestExtractGenesisBlock(t *testing.T) { 1115 for _, testCase := range []struct { 1116 name string 1117 expectedErr string 1118 returnedName string 1119 block *common.Block 1120 returnGenesisBlock bool 1121 }{ 1122 { 1123 name: "nil block", 1124 expectedErr: "nil block", 1125 }, 1126 { 1127 name: "no data section in block", 1128 expectedErr: "block data is nil", 1129 block: &common.Block{}, 1130 }, 1131 { 1132 name: "corrupt envelope in block", 1133 expectedErr: "block data does not carry an" + 1134 " envelope at index 0: error unmarshaling Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)", 1135 block: &common.Block{ 1136 Data: &common.BlockData{ 1137 Data: [][]byte{{1, 2, 3}}, 1138 }, 1139 }, 1140 }, 1141 { 1142 name: "corrupt payload in envelope", 1143 expectedErr: "error unmarshaling Payload: proto: common.Payload: illegal tag 0 (wire type 1)", 1144 block: &common.Block{ 1145 Data: &common.BlockData{ 1146 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1147 Payload: []byte{1, 2, 3}, 1148 })}, 1149 }, 1150 }, 1151 }, 1152 { 1153 name: "no header in block", 1154 expectedErr: "nil header in payload", 1155 block: &common.Block{ 1156 Data: &common.BlockData{ 1157 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1158 Payload: protoutil.MarshalOrPanic(&common.Payload{}), 1159 })}, 1160 }, 1161 }, 1162 }, 1163 { 1164 name: "corrupt channel header", 1165 expectedErr: "error unmarshaling ChannelHeader:" + 1166 " proto: common.ChannelHeader: illegal tag 0 (wire type 1)", 1167 block: &common.Block{ 1168 Data: &common.BlockData{ 1169 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1170 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1171 Header: &common.Header{ 1172 ChannelHeader: []byte{1, 2, 3}, 1173 }, 1174 }), 1175 })}, 1176 }, 1177 }, 1178 }, 1179 { 1180 name: "not an orderer transaction", 1181 expectedErr: "", 1182 block: &common.Block{ 1183 Data: &common.BlockData{ 1184 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1185 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1186 Header: &common.Header{ 1187 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1188 Type: int32(common.HeaderType_CONFIG_UPDATE), 1189 }), 1190 }, 1191 }), 1192 })}, 1193 }, 1194 }, 1195 }, 1196 { 1197 name: "orderer transaction with corrupt inner envelope", 1198 expectedErr: "error unmarshaling Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)", 1199 block: &common.Block{ 1200 Data: &common.BlockData{ 1201 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1202 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1203 Header: &common.Header{ 1204 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1205 Type: int32(common.HeaderType_ORDERER_TRANSACTION), 1206 }), 1207 }, 1208 Data: []byte{1, 2, 3}, 1209 }), 1210 })}, 1211 }, 1212 }, 1213 }, 1214 { 1215 name: "orderer transaction with corrupt inner payload", 1216 expectedErr: "error unmarshaling Payload: proto: common.Payload: illegal tag 0 (wire type 1)", 1217 block: &common.Block{ 1218 Data: &common.BlockData{ 1219 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1220 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1221 Header: &common.Header{ 1222 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1223 Type: int32(common.HeaderType_ORDERER_TRANSACTION), 1224 }), 1225 }, 1226 Data: protoutil.MarshalOrPanic(&common.Envelope{ 1227 Payload: []byte{1, 2, 3}, 1228 }), 1229 }), 1230 })}, 1231 }, 1232 }, 1233 }, 1234 { 1235 name: "orderer transaction with nil inner header", 1236 expectedErr: "inner payload's header is nil", 1237 block: &common.Block{ 1238 Data: &common.BlockData{ 1239 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1240 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1241 Header: &common.Header{ 1242 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1243 Type: int32(common.HeaderType_ORDERER_TRANSACTION), 1244 }), 1245 }, 1246 Data: protoutil.MarshalOrPanic(&common.Envelope{ 1247 Payload: protoutil.MarshalOrPanic(&common.Payload{}), 1248 }), 1249 }), 1250 })}, 1251 }, 1252 }, 1253 }, 1254 { 1255 name: "orderer transaction with corrupt inner channel header", 1256 expectedErr: "error unmarshaling ChannelHeader: proto: common.ChannelHeader: illegal tag 0 (wire type 1)", 1257 block: &common.Block{ 1258 Data: &common.BlockData{ 1259 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1260 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1261 Header: &common.Header{ 1262 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1263 Type: int32(common.HeaderType_ORDERER_TRANSACTION), 1264 }), 1265 }, 1266 Data: protoutil.MarshalOrPanic(&common.Envelope{ 1267 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1268 Header: &common.Header{ 1269 ChannelHeader: []byte{1, 2, 3}, 1270 }, 1271 }), 1272 }), 1273 }), 1274 })}, 1275 }, 1276 }, 1277 }, 1278 { 1279 name: "orderer transaction that is not a config, but a config update", 1280 expectedErr: "", 1281 block: &common.Block{ 1282 Data: &common.BlockData{ 1283 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1284 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1285 Header: &common.Header{ 1286 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1287 Type: int32(common.HeaderType_ORDERER_TRANSACTION), 1288 }), 1289 }, 1290 Data: protoutil.MarshalOrPanic(&common.Envelope{ 1291 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1292 Header: &common.Header{ 1293 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1294 Type: int32(common.HeaderType_CONFIG_UPDATE), 1295 }), 1296 }, 1297 }), 1298 }), 1299 }), 1300 })}, 1301 }, 1302 }, 1303 }, 1304 { 1305 expectedErr: "", 1306 name: "orderer transaction that is a system channel config block", 1307 block: &common.Block{ 1308 Data: &common.BlockData{ 1309 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1310 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1311 Header: &common.Header{ 1312 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1313 ChannelId: "systemChannel", 1314 Type: int32(common.HeaderType_ORDERER_TRANSACTION), 1315 }), 1316 }, 1317 Data: protoutil.MarshalOrPanic(&common.Envelope{ 1318 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1319 Header: &common.Header{ 1320 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1321 Type: int32(common.HeaderType_CONFIG), 1322 ChannelId: "systemChannel", 1323 }), 1324 }, 1325 }), 1326 }), 1327 }), 1328 })}, 1329 }, 1330 }, 1331 }, 1332 { 1333 name: "orderer transaction that creates a new application channel", 1334 expectedErr: "", 1335 returnedName: "notSystemChannel", 1336 block: &common.Block{ 1337 Data: &common.BlockData{ 1338 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1339 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1340 Header: &common.Header{ 1341 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1342 ChannelId: "systemChannel", 1343 Type: int32(common.HeaderType_ORDERER_TRANSACTION), 1344 }), 1345 }, 1346 Data: protoutil.MarshalOrPanic(&common.Envelope{ 1347 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1348 Header: &common.Header{ 1349 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1350 Type: int32(common.HeaderType_CONFIG), 1351 ChannelId: "notSystemChannel", 1352 }), 1353 }, 1354 }), 1355 }), 1356 }), 1357 })}, 1358 }, 1359 }, 1360 returnGenesisBlock: true, 1361 }, 1362 } { 1363 t.Run(testCase.name, func(t *testing.T) { 1364 channelName, gb, err := cluster.ExtractGenesisBlock(flogging.MustGetLogger("test"), testCase.block) 1365 if testCase.expectedErr != "" { 1366 assert.EqualError(t, err, testCase.expectedErr) 1367 } else { 1368 assert.NoError(t, err) 1369 } 1370 assert.Equal(t, testCase.returnedName, channelName) 1371 if testCase.returnGenesisBlock { 1372 assert.NotNil(t, gb) 1373 } else { 1374 assert.Nil(t, gb) 1375 } 1376 }) 1377 } 1378 } 1379 1380 func TestChannels(t *testing.T) { 1381 makeBlock := func(outerChannelName, innerChannelName string) *common.Block { 1382 return &common.Block{ 1383 Header: &common.BlockHeader{}, 1384 Data: &common.BlockData{ 1385 Data: [][]byte{protoutil.MarshalOrPanic(&common.Envelope{ 1386 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1387 Header: &common.Header{ 1388 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1389 ChannelId: outerChannelName, 1390 Type: int32(common.HeaderType_ORDERER_TRANSACTION), 1391 }), 1392 }, 1393 Data: protoutil.MarshalOrPanic(&common.Envelope{ 1394 Payload: protoutil.MarshalOrPanic(&common.Payload{ 1395 Header: &common.Header{ 1396 ChannelHeader: protoutil.MarshalOrPanic(&common.ChannelHeader{ 1397 Type: int32(common.HeaderType_CONFIG), 1398 ChannelId: innerChannelName, 1399 }), 1400 }, 1401 }), 1402 }), 1403 }), 1404 })}, 1405 }, 1406 } 1407 } 1408 1409 for _, testCase := range []struct { 1410 name string 1411 prepareSystemChain func(systemChain []*common.Block) 1412 assertion func(t *testing.T, ci *cluster.ChainInspector) 1413 }{ 1414 { 1415 name: "happy path - artificial blocks", 1416 prepareSystemChain: func(systemChain []*common.Block) { 1417 assignHashes(systemChain) 1418 }, 1419 assertion: func(t *testing.T, ci *cluster.ChainInspector) { 1420 actual := cluster.GenesisBlocks(ci.Channels()) 1421 // Assert that the returned channels are returned in any order 1422 assert.Contains(t, [][]string{{"mychannel", "mychannel2"}, {"mychannel2", "mychannel"}}, actual.Names()) 1423 }, 1424 }, 1425 { 1426 name: "happy path - one block is not artificial but real", 1427 prepareSystemChain: func(systemChain []*common.Block) { 1428 blockbytes, err := ioutil.ReadFile(filepath.Join("testdata", "block3.pb")) 1429 assert.NoError(t, err) 1430 block := &common.Block{} 1431 err = proto.Unmarshal(blockbytes, block) 1432 assert.NoError(t, err) 1433 1434 systemChain[len(systemChain)/2-1] = block 1435 assignHashes(systemChain) 1436 }, 1437 assertion: func(t *testing.T, ci *cluster.ChainInspector) { 1438 actual := cluster.GenesisBlocks(ci.Channels()) 1439 // Assert that the returned channels are returned in any order 1440 assert.Contains(t, [][]string{{"mychannel2", "bar"}, {"bar", "mychannel2"}}, actual.Names()) 1441 }, 1442 }, 1443 { 1444 name: "bad path - pulled chain's last block hash doesn't match the last config block", 1445 prepareSystemChain: func(systemChain []*common.Block) { 1446 assignHashes(systemChain) 1447 systemChain[len(systemChain)-1].Header.PreviousHash = nil 1448 }, 1449 assertion: func(t *testing.T, ci *cluster.ChainInspector) { 1450 panicValue := "System channel pulled doesn't match the boot last config block:" + 1451 " block [2]'s hash (bc4ef5cc8a61ac0747cc82df58bac9ad3278622c1cfc7a119b9b1068e422c9f1)" + 1452 " mismatches block [3]'s prev block hash ()" 1453 assert.PanicsWithValue(t, panicValue, func() { 1454 ci.Channels() 1455 }) 1456 }, 1457 }, 1458 { 1459 name: "bad path - hash chain mismatch", 1460 prepareSystemChain: func(systemChain []*common.Block) { 1461 assignHashes(systemChain) 1462 systemChain[len(systemChain)-2].Header.PreviousHash = nil 1463 }, 1464 assertion: func(t *testing.T, ci *cluster.ChainInspector) { 1465 panicValue := "Claimed previous hash of block [2] is but actual previous " + 1466 "hash is 920faeb0bd8a02b3f2553247359fb3b684819c75c6e5487bc7eed632841ddc5f" 1467 assert.PanicsWithValue(t, panicValue, func() { 1468 ci.Channels() 1469 }) 1470 }, 1471 }, 1472 { 1473 name: "bad path - a block cannot be classified", 1474 prepareSystemChain: func(systemChain []*common.Block) { 1475 assignHashes(systemChain) 1476 systemChain[len(systemChain)-2].Data.Data = [][]byte{{1, 2, 3}} 1477 }, 1478 assertion: func(t *testing.T, ci *cluster.ChainInspector) { 1479 panicValue := "Failed extracting channel genesis block from config block: " + 1480 "block data does not carry an envelope at index 0: error unmarshaling " + 1481 "Envelope: proto: common.Envelope: illegal tag 0 (wire type 1)" 1482 assert.PanicsWithValue(t, panicValue, func() { 1483 ci.Channels() 1484 }) 1485 }, 1486 }, 1487 { 1488 name: "bad path - failed pulling blocks", 1489 prepareSystemChain: func(systemChain []*common.Block) { 1490 assignHashes(systemChain) 1491 // Setting a block to nil makes the block puller return nil, 1492 // which signals failure of pulling a block. 1493 systemChain[len(systemChain)/2] = nil 1494 }, 1495 assertion: func(t *testing.T, ci *cluster.ChainInspector) { 1496 panicValue := "Failed pulling block [2] from the system channel" 1497 assert.PanicsWithValue(t, panicValue, func() { 1498 ci.Channels() 1499 }) 1500 }, 1501 }, 1502 } { 1503 t.Run(testCase.name, func(t *testing.T) { 1504 systemChain := []*common.Block{ 1505 makeBlock("systemChannel", "systemChannel"), 1506 makeBlock("systemChannel", "mychannel"), 1507 makeBlock("systemChannel", "mychannel2"), 1508 makeBlock("systemChannel", "systemChannel"), 1509 } 1510 1511 for i := 0; i < len(systemChain); i++ { 1512 systemChain[i].Header.DataHash = protoutil.BlockDataHash(systemChain[i].Data) 1513 systemChain[i].Header.Number = uint64(i) 1514 } 1515 testCase.prepareSystemChain(systemChain) 1516 puller := &mocks.ChainPuller{} 1517 puller.On("Close") 1518 for seq := uint64(0); int(seq) < len(systemChain)-1; seq++ { 1519 puller.On("PullBlock", seq).Return(systemChain[int(seq)]) 1520 } 1521 1522 ci := &cluster.ChainInspector{ 1523 Logger: flogging.MustGetLogger("test"), 1524 Puller: puller, 1525 LastConfigBlock: systemChain[len(systemChain)-1], 1526 } 1527 defer puller.AssertNumberOfCalls(t, "Close", 1) 1528 defer ci.Close() 1529 testCase.assertion(t, ci) 1530 }) 1531 } 1532 } 1533 1534 var fakeGB = &common.Block{ 1535 Header: &common.BlockHeader{}, 1536 Metadata: &common.BlockMetadata{ 1537 Metadata: [][]byte{{}, {}, {}, {}}, 1538 }, 1539 Data: &common.BlockData{ 1540 Data: [][]byte{ 1541 protoutil.MarshalOrPanic(&common.Envelope{ 1542 Payload: protoutil.MarshalOrPanic(&common.Envelope{ 1543 Payload: protoutil.MarshalOrPanic(&common.Config{ 1544 Sequence: 1, 1545 }), 1546 }), 1547 }), 1548 }, 1549 }, 1550 } 1551 1552 func simulateNonParticipantChannelPull(osn *deliverServer) { 1553 lastBlock := protoutil.NewBlock(1, nil) 1554 lastBlock.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&common.Metadata{ 1555 Value: protoutil.MarshalOrPanic(&common.LastConfig{Index: 0}), 1556 }) 1557 // We first present a channel with a last block of 'lastBlock', that points to 1558 // the genesis block 1559 osn.addExpectProbeAssert() 1560 osn.blockResponses <- &orderer.DeliverResponse{ 1561 Type: &orderer.DeliverResponse_Block{Block: lastBlock}, 1562 } 1563 osn.addExpectProbeAssert() 1564 osn.blockResponses <- &orderer.DeliverResponse{ 1565 Type: &orderer.DeliverResponse_Block{Block: lastBlock}, 1566 } 1567 osn.addExpectPullAssert(1) 1568 osn.blockResponses <- &orderer.DeliverResponse{ 1569 Type: &orderer.DeliverResponse_Block{Block: lastBlock}, 1570 } 1571 osn.blockResponses <- nil 1572 1573 // and make it send back the genesis block. 1574 // First send is for probing, 1575 osn.addExpectProbeAssert() 1576 osn.blockResponses <- &orderer.DeliverResponse{ 1577 Type: &orderer.DeliverResponse_Block{Block: fakeGB}, 1578 } 1579 osn.addExpectPullAssert(0) 1580 // and the second one sends the actual block itself downstream 1581 osn.blockResponses <- &orderer.DeliverResponse{ 1582 Type: &orderer.DeliverResponse_Block{Block: fakeGB}, 1583 } 1584 1585 osn.blockResponses <- nil 1586 } 1587 1588 func TestFilter(t *testing.T) { 1589 logger := flogging.MustGetLogger("test") 1590 logger = logger.WithOptions(zap.Hooks(func(entry zapcore.Entry) error { 1591 assert.Equal(t, "Channel foo shouldn't be pulled. Skipping it", entry.Message) 1592 return nil 1593 })) 1594 1595 r := &cluster.Replicator{ 1596 Filter: func(_ string) bool { 1597 return false 1598 }, 1599 Logger: logger, 1600 } 1601 assert.Equal(t, cluster.ErrSkipped, r.PullChannel("foo")) 1602 }