github.com/anjalikarhana/fabric@v2.1.1+incompatible/orderer/common/cluster/deliver.go (about) 1 /* 2 Copyright IBM Corp. 2018 All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package cluster 8 9 import ( 10 "context" 11 "math" 12 "math/rand" 13 "reflect" 14 "sync" 15 "sync/atomic" 16 "time" 17 18 "github.com/hyperledger/fabric-protos-go/common" 19 "github.com/hyperledger/fabric-protos-go/orderer" 20 "github.com/hyperledger/fabric/common/flogging" 21 "github.com/hyperledger/fabric/common/util" 22 "github.com/hyperledger/fabric/internal/pkg/identity" 23 "github.com/hyperledger/fabric/protoutil" 24 "github.com/pkg/errors" 25 "google.golang.org/grpc" 26 ) 27 28 // BlockPuller pulls blocks from remote ordering nodes. 29 // Its operations are not thread safe. 30 type BlockPuller struct { 31 // Configuration 32 MaxPullBlockRetries uint64 33 MaxTotalBufferBytes int 34 Signer identity.SignerSerializer 35 TLSCert []byte 36 Channel string 37 FetchTimeout time.Duration 38 RetryTimeout time.Duration 39 Logger *flogging.FabricLogger 40 Dialer Dialer 41 VerifyBlockSequence BlockSequenceVerifier 42 Endpoints []EndpointCriteria 43 // Internal state 44 stream *ImpatientStream 45 blockBuff []*common.Block 46 latestSeq uint64 47 endpoint string 48 conn *grpc.ClientConn 49 cancelStream func() 50 } 51 52 // Clone returns a copy of this BlockPuller initialized 53 // for the given channel 54 func (p *BlockPuller) Clone() *BlockPuller { 55 // Clone by value 56 copy := *p 57 // Reset internal state 58 copy.stream = nil 59 copy.blockBuff = nil 60 copy.latestSeq = 0 61 copy.endpoint = "" 62 copy.conn = nil 63 copy.cancelStream = nil 64 return © 65 } 66 67 // Close makes the BlockPuller close the connection and stream 68 // with the remote endpoint, and wipe the internal block buffer. 69 func (p *BlockPuller) Close() { 70 if p.cancelStream != nil { 71 p.cancelStream() 72 } 73 p.cancelStream = nil 74 75 if p.conn != nil { 76 p.conn.Close() 77 } 78 p.conn = nil 79 p.endpoint = "" 80 p.latestSeq = 0 81 p.blockBuff = nil 82 } 83 84 // PullBlock blocks until a block with the given sequence is fetched 85 // from some remote ordering node, or until consecutive failures 86 // of fetching the block exceed MaxPullBlockRetries. 87 func (p *BlockPuller) PullBlock(seq uint64) *common.Block { 88 retriesLeft := p.MaxPullBlockRetries 89 for { 90 block := p.tryFetchBlock(seq) 91 if block != nil { 92 return block 93 } 94 retriesLeft-- 95 if retriesLeft == 0 && p.MaxPullBlockRetries > 0 { 96 p.Logger.Errorf("Failed pulling block [%d]: retry count exhausted(%d)", seq, p.MaxPullBlockRetries) 97 return nil 98 } 99 time.Sleep(p.RetryTimeout) 100 } 101 } 102 103 // HeightsByEndpoints returns the block heights by endpoints of orderers 104 func (p *BlockPuller) HeightsByEndpoints() (map[string]uint64, error) { 105 endpointsInfo := p.probeEndpoints(0) 106 res := make(map[string]uint64) 107 for endpoint, endpointInfo := range endpointsInfo.byEndpoints() { 108 endpointInfo.conn.Close() 109 res[endpoint] = endpointInfo.lastBlockSeq + 1 110 } 111 p.Logger.Info("Returning the heights of OSNs mapped by endpoints", res) 112 return res, endpointsInfo.err 113 } 114 115 func (p *BlockPuller) tryFetchBlock(seq uint64) *common.Block { 116 var reConnected bool 117 for p.isDisconnected() { 118 reConnected = true 119 p.connectToSomeEndpoint(seq) 120 if p.isDisconnected() { 121 time.Sleep(p.RetryTimeout) 122 } 123 } 124 125 block := p.popBlock(seq) 126 if block != nil { 127 return block 128 } 129 // Else, buffer is empty. So we need to pull blocks 130 // to re-fill it. 131 if err := p.pullBlocks(seq, reConnected); err != nil { 132 p.Logger.Errorf("Failed pulling blocks: %v", err) 133 // Something went wrong, disconnect. and return nil 134 p.Close() 135 // If we have a block in the buffer, return it. 136 if len(p.blockBuff) > 0 { 137 return p.blockBuff[0] 138 } 139 return nil 140 } 141 142 if err := p.VerifyBlockSequence(p.blockBuff, p.Channel); err != nil { 143 p.Close() 144 p.Logger.Errorf("Failed verifying received blocks: %v", err) 145 return nil 146 } 147 148 // At this point, the buffer is full, so shift it and return the first block. 149 return p.popBlock(seq) 150 } 151 152 func (p *BlockPuller) setCancelStreamFunc(f func()) { 153 p.cancelStream = f 154 } 155 156 func (p *BlockPuller) pullBlocks(seq uint64, reConnected bool) error { 157 env, err := p.seekNextEnvelope(seq) 158 if err != nil { 159 p.Logger.Errorf("Failed creating seek envelope: %v", err) 160 return err 161 } 162 163 stream, err := p.obtainStream(reConnected, env, seq) 164 if err != nil { 165 return err 166 } 167 168 var totalSize int 169 p.blockBuff = nil 170 nextExpectedSequence := seq 171 for totalSize < p.MaxTotalBufferBytes && nextExpectedSequence <= p.latestSeq { 172 resp, err := stream.Recv() 173 if err != nil { 174 p.Logger.Errorf("Failed receiving next block from %s: %v", p.endpoint, err) 175 return err 176 } 177 178 block, err := extractBlockFromResponse(resp) 179 if err != nil { 180 p.Logger.Errorf("Received a bad block from %s: %v", p.endpoint, err) 181 return err 182 } 183 seq := block.Header.Number 184 if seq != nextExpectedSequence { 185 p.Logger.Errorf("Expected to receive sequence %d but got %d instead", nextExpectedSequence, seq) 186 return errors.Errorf("got unexpected sequence from %s - (%d) instead of (%d)", p.endpoint, seq, nextExpectedSequence) 187 } 188 size := blockSize(block) 189 totalSize += size 190 p.blockBuff = append(p.blockBuff, block) 191 nextExpectedSequence++ 192 p.Logger.Infof("Got block [%d] of size %d KB from %s", seq, size/1024, p.endpoint) 193 } 194 return nil 195 } 196 197 func (p *BlockPuller) obtainStream(reConnected bool, env *common.Envelope, seq uint64) (*ImpatientStream, error) { 198 var stream *ImpatientStream 199 var err error 200 if reConnected { 201 p.Logger.Infof("Sending request for block [%d] to %s", seq, p.endpoint) 202 stream, err = p.requestBlocks(p.endpoint, NewImpatientStream(p.conn, p.FetchTimeout), env) 203 if err != nil { 204 return nil, err 205 } 206 // Stream established successfully. 207 // In next iterations of this function, reuse it. 208 p.stream = stream 209 } else { 210 // Reuse previous stream 211 stream = p.stream 212 } 213 214 p.setCancelStreamFunc(stream.cancelFunc) 215 return stream, nil 216 } 217 218 // popBlock pops a block from the in-memory buffer and returns it, 219 // or returns nil if the buffer is empty or the block doesn't match 220 // the given wanted sequence. 221 func (p *BlockPuller) popBlock(seq uint64) *common.Block { 222 if len(p.blockBuff) == 0 { 223 return nil 224 } 225 block, rest := p.blockBuff[0], p.blockBuff[1:] 226 p.blockBuff = rest 227 // If the requested block sequence is the wrong one, discard the buffer 228 // to start fetching blocks all over again. 229 if seq != block.Header.Number { 230 p.blockBuff = nil 231 return nil 232 } 233 return block 234 } 235 236 func (p *BlockPuller) isDisconnected() bool { 237 return p.conn == nil 238 } 239 240 // connectToSomeEndpoint makes the BlockPuller connect to some endpoint that has 241 // the given minimum block sequence. 242 func (p *BlockPuller) connectToSomeEndpoint(minRequestedSequence uint64) { 243 // Probe all endpoints in parallel, searching an endpoint with a given minimum block sequence 244 // and then sort them by their endpoints to a map. 245 endpointsInfo := p.probeEndpoints(minRequestedSequence).byEndpoints() 246 if len(endpointsInfo) == 0 { 247 p.Logger.Warningf("Could not connect to any endpoint of %v", p.Endpoints) 248 return 249 } 250 251 // Choose a random endpoint out of the available endpoints 252 chosenEndpoint := randomEndpoint(endpointsInfo) 253 // Disconnect all connections but this endpoint 254 for endpoint, endpointInfo := range endpointsInfo { 255 if endpoint == chosenEndpoint { 256 continue 257 } 258 endpointInfo.conn.Close() 259 } 260 261 p.conn = endpointsInfo[chosenEndpoint].conn 262 p.endpoint = chosenEndpoint 263 p.latestSeq = endpointsInfo[chosenEndpoint].lastBlockSeq 264 265 p.Logger.Infof("Connected to %s with last block seq of %d", p.endpoint, p.latestSeq) 266 } 267 268 // probeEndpoints reaches to all endpoints known and returns the latest block sequences 269 // of the endpoints, as well as gRPC connections to them. 270 func (p *BlockPuller) probeEndpoints(minRequestedSequence uint64) *endpointInfoBucket { 271 endpointsInfo := make(chan *endpointInfo, len(p.Endpoints)) 272 273 var wg sync.WaitGroup 274 wg.Add(len(p.Endpoints)) 275 276 var forbiddenErr uint32 277 var unavailableErr uint32 278 279 for _, endpoint := range p.Endpoints { 280 go func(endpoint EndpointCriteria) { 281 defer wg.Done() 282 ei, err := p.probeEndpoint(endpoint, minRequestedSequence) 283 if err != nil { 284 p.Logger.Warningf("Received error of type '%v' from %s", err, endpoint) 285 if err == ErrForbidden { 286 atomic.StoreUint32(&forbiddenErr, 1) 287 } 288 if err == ErrServiceUnavailable { 289 atomic.StoreUint32(&unavailableErr, 1) 290 } 291 return 292 } 293 endpointsInfo <- ei 294 }(endpoint) 295 } 296 wg.Wait() 297 298 close(endpointsInfo) 299 eib := &endpointInfoBucket{ 300 bucket: endpointsInfo, 301 logger: p.Logger, 302 } 303 304 if unavailableErr == 1 && len(endpointsInfo) == 0 { 305 eib.err = ErrServiceUnavailable 306 } 307 if forbiddenErr == 1 && len(endpointsInfo) == 0 { 308 eib.err = ErrForbidden 309 } 310 return eib 311 } 312 313 // probeEndpoint returns a gRPC connection and the latest block sequence of an endpoint with the given 314 // requires minimum sequence, or error if something goes wrong. 315 func (p *BlockPuller) probeEndpoint(endpoint EndpointCriteria, minRequestedSequence uint64) (*endpointInfo, error) { 316 conn, err := p.Dialer.Dial(endpoint) 317 if err != nil { 318 p.Logger.Warningf("Failed connecting to %s: %v", endpoint, err) 319 return nil, err 320 } 321 322 lastBlockSeq, err := p.fetchLastBlockSeq(minRequestedSequence, endpoint.Endpoint, conn) 323 if err != nil { 324 conn.Close() 325 return nil, err 326 } 327 328 return &endpointInfo{conn: conn, lastBlockSeq: lastBlockSeq, endpoint: endpoint.Endpoint}, nil 329 } 330 331 // randomEndpoint returns a random endpoint of the given endpointInfo 332 func randomEndpoint(endpointsToHeight map[string]*endpointInfo) string { 333 var candidates []string 334 for endpoint := range endpointsToHeight { 335 candidates = append(candidates, endpoint) 336 } 337 338 rand.Seed(time.Now().UnixNano()) 339 return candidates[rand.Intn(len(candidates))] 340 } 341 342 // fetchLastBlockSeq returns the last block sequence of an endpoint with the given gRPC connection. 343 func (p *BlockPuller) fetchLastBlockSeq(minRequestedSequence uint64, endpoint string, conn *grpc.ClientConn) (uint64, error) { 344 env, err := p.seekLastEnvelope() 345 if err != nil { 346 p.Logger.Errorf("Failed creating seek envelope for %s: %v", endpoint, err) 347 return 0, err 348 } 349 350 stream, err := p.requestBlocks(endpoint, NewImpatientStream(conn, p.FetchTimeout), env) 351 if err != nil { 352 return 0, err 353 } 354 defer stream.abort() 355 356 resp, err := stream.Recv() 357 if err != nil { 358 p.Logger.Errorf("Failed receiving the latest block from %s: %v", endpoint, err) 359 return 0, err 360 } 361 362 block, err := extractBlockFromResponse(resp) 363 if err != nil { 364 p.Logger.Warningf("Received %v from %s: %v", resp, endpoint, err) 365 return 0, err 366 } 367 stream.CloseSend() 368 369 seq := block.Header.Number 370 if seq < minRequestedSequence { 371 err := errors.Errorf("minimum requested sequence is %d but %s is at sequence %d", minRequestedSequence, endpoint, seq) 372 p.Logger.Infof("Skipping pulling from %s: %v", endpoint, err) 373 return 0, err 374 } 375 376 p.Logger.Infof("%s is at block sequence of %d", endpoint, seq) 377 return block.Header.Number, nil 378 } 379 380 // requestBlocks starts requesting blocks from the given endpoint, using the given ImpatientStreamCreator by sending 381 // the given envelope. 382 // It returns a stream that is used to pull blocks, or error if something goes wrong. 383 func (p *BlockPuller) requestBlocks(endpoint string, newStream ImpatientStreamCreator, env *common.Envelope) (*ImpatientStream, error) { 384 stream, err := newStream() 385 if err != nil { 386 p.Logger.Warningf("Failed establishing deliver stream with %s", endpoint) 387 return nil, err 388 } 389 390 if err := stream.Send(env); err != nil { 391 p.Logger.Errorf("Failed sending seek envelope to %s: %v", endpoint, err) 392 stream.abort() 393 return nil, err 394 } 395 return stream, nil 396 } 397 398 func extractBlockFromResponse(resp *orderer.DeliverResponse) (*common.Block, error) { 399 switch t := resp.Type.(type) { 400 case *orderer.DeliverResponse_Block: 401 block := t.Block 402 if block == nil { 403 return nil, errors.New("block is nil") 404 } 405 if block.Data == nil { 406 return nil, errors.New("block data is nil") 407 } 408 if block.Header == nil { 409 return nil, errors.New("block header is nil") 410 } 411 if block.Metadata == nil || len(block.Metadata.Metadata) == 0 { 412 return nil, errors.New("block metadata is empty") 413 } 414 return block, nil 415 case *orderer.DeliverResponse_Status: 416 if t.Status == common.Status_FORBIDDEN { 417 return nil, ErrForbidden 418 } 419 if t.Status == common.Status_SERVICE_UNAVAILABLE { 420 return nil, ErrServiceUnavailable 421 } 422 return nil, errors.Errorf("faulty node, received: %v", resp) 423 default: 424 return nil, errors.Errorf("response is of type %v, but expected a block", reflect.TypeOf(resp.Type)) 425 } 426 } 427 428 func (p *BlockPuller) seekLastEnvelope() (*common.Envelope, error) { 429 return protoutil.CreateSignedEnvelopeWithTLSBinding( 430 common.HeaderType_DELIVER_SEEK_INFO, 431 p.Channel, 432 p.Signer, 433 last(), 434 int32(0), 435 uint64(0), 436 util.ComputeSHA256(p.TLSCert), 437 ) 438 } 439 440 func (p *BlockPuller) seekNextEnvelope(startSeq uint64) (*common.Envelope, error) { 441 return protoutil.CreateSignedEnvelopeWithTLSBinding( 442 common.HeaderType_DELIVER_SEEK_INFO, 443 p.Channel, 444 p.Signer, 445 nextSeekInfo(startSeq), 446 int32(0), 447 uint64(0), 448 util.ComputeSHA256(p.TLSCert), 449 ) 450 } 451 452 func last() *orderer.SeekInfo { 453 return &orderer.SeekInfo{ 454 Start: &orderer.SeekPosition{Type: &orderer.SeekPosition_Newest{Newest: &orderer.SeekNewest{}}}, 455 Stop: &orderer.SeekPosition{Type: &orderer.SeekPosition_Specified{Specified: &orderer.SeekSpecified{Number: math.MaxUint64}}}, 456 Behavior: orderer.SeekInfo_BLOCK_UNTIL_READY, 457 ErrorResponse: orderer.SeekInfo_BEST_EFFORT, 458 } 459 } 460 461 func nextSeekInfo(startSeq uint64) *orderer.SeekInfo { 462 return &orderer.SeekInfo{ 463 Start: &orderer.SeekPosition{Type: &orderer.SeekPosition_Specified{Specified: &orderer.SeekSpecified{Number: startSeq}}}, 464 Stop: &orderer.SeekPosition{Type: &orderer.SeekPosition_Specified{Specified: &orderer.SeekSpecified{Number: math.MaxUint64}}}, 465 Behavior: orderer.SeekInfo_BLOCK_UNTIL_READY, 466 ErrorResponse: orderer.SeekInfo_BEST_EFFORT, 467 } 468 } 469 470 func blockSize(block *common.Block) int { 471 return len(protoutil.MarshalOrPanic(block)) 472 } 473 474 type endpointInfo struct { 475 endpoint string 476 conn *grpc.ClientConn 477 lastBlockSeq uint64 478 } 479 480 type endpointInfoBucket struct { 481 bucket <-chan *endpointInfo 482 logger *flogging.FabricLogger 483 err error 484 } 485 486 func (eib endpointInfoBucket) byEndpoints() map[string]*endpointInfo { 487 infoByEndpoints := make(map[string]*endpointInfo) 488 for endpointInfo := range eib.bucket { 489 if _, exists := infoByEndpoints[endpointInfo.endpoint]; exists { 490 eib.logger.Warningf("Duplicate endpoint found(%s), skipping it", endpointInfo.endpoint) 491 endpointInfo.conn.Close() 492 continue 493 } 494 infoByEndpoints[endpointInfo.endpoint] = endpointInfo 495 } 496 return infoByEndpoints 497 } 498 499 // ImpatientStreamCreator creates an ImpatientStream 500 type ImpatientStreamCreator func() (*ImpatientStream, error) 501 502 // ImpatientStream aborts the stream if it waits for too long for a message. 503 type ImpatientStream struct { 504 waitTimeout time.Duration 505 orderer.AtomicBroadcast_DeliverClient 506 cancelFunc func() 507 } 508 509 func (stream *ImpatientStream) abort() { 510 stream.cancelFunc() 511 } 512 513 // Recv blocks until a response is received from the stream or the 514 // timeout expires. 515 func (stream *ImpatientStream) Recv() (*orderer.DeliverResponse, error) { 516 // Initialize a timeout to cancel the stream when it expires 517 timeout := time.NewTimer(stream.waitTimeout) 518 defer timeout.Stop() 519 520 responseChan := make(chan errorAndResponse, 1) 521 522 // receive waitGroup ensures the goroutine below exits before 523 // this function exits. 524 var receive sync.WaitGroup 525 receive.Add(1) 526 defer receive.Wait() 527 528 go func() { 529 defer receive.Done() 530 resp, err := stream.AtomicBroadcast_DeliverClient.Recv() 531 responseChan <- errorAndResponse{err: err, resp: resp} 532 }() 533 534 select { 535 case <-timeout.C: 536 stream.cancelFunc() 537 return nil, errors.Errorf("didn't receive a response within %v", stream.waitTimeout) 538 case respAndErr := <-responseChan: 539 return respAndErr.resp, respAndErr.err 540 } 541 } 542 543 // NewImpatientStream returns a ImpatientStreamCreator that creates impatientStreams. 544 func NewImpatientStream(conn *grpc.ClientConn, waitTimeout time.Duration) ImpatientStreamCreator { 545 return func() (*ImpatientStream, error) { 546 abc := orderer.NewAtomicBroadcastClient(conn) 547 ctx, cancel := context.WithCancel(context.Background()) 548 549 stream, err := abc.Deliver(ctx) 550 if err != nil { 551 cancel() 552 return nil, err 553 } 554 555 once := &sync.Once{} 556 return &ImpatientStream{ 557 waitTimeout: waitTimeout, 558 // The stream might be canceled while Close() is being called, but also 559 // while a timeout expires, so ensure it's only called once. 560 cancelFunc: func() { 561 once.Do(cancel) 562 }, 563 AtomicBroadcast_DeliverClient: stream, 564 }, nil 565 } 566 } 567 568 type errorAndResponse struct { 569 err error 570 resp *orderer.DeliverResponse 571 }