github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/orderer/common/cluster/deliver.go (about)

     1  /*
     2  Copyright hechain. 2018 All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package cluster
     8  
     9  import (
    10  	"context"
    11  	"math"
    12  	"math/rand"
    13  	"reflect"
    14  	"sync"
    15  	"sync/atomic"
    16  	"time"
    17  
    18  	"github.com/hechain20/hechain/common/flogging"
    19  	"github.com/hechain20/hechain/common/util"
    20  	"github.com/hechain20/hechain/internal/pkg/identity"
    21  	"github.com/hechain20/hechain/protoutil"
    22  	"github.com/hyperledger/fabric-protos-go/common"
    23  	"github.com/hyperledger/fabric-protos-go/orderer"
    24  	"github.com/pkg/errors"
    25  	"google.golang.org/grpc"
    26  )
    27  
    28  // BlockPuller pulls blocks from remote ordering nodes.
    29  // Its operations are not thread safe.
    30  type BlockPuller struct {
    31  	// Configuration
    32  	MaxPullBlockRetries uint64
    33  	MaxTotalBufferBytes int
    34  	Signer              identity.SignerSerializer
    35  	TLSCert             []byte
    36  	Channel             string
    37  	FetchTimeout        time.Duration
    38  	RetryTimeout        time.Duration
    39  	Logger              *flogging.FabricLogger
    40  	Dialer              Dialer
    41  	VerifyBlockSequence BlockSequenceVerifier
    42  	Endpoints           []EndpointCriteria
    43  
    44  	// A 'stopper' goroutine may signal the go-routine servicing PullBlock & HeightsByEndpoints to stop by closing this
    45  	// channel. Note: all methods of the BlockPuller must be serviced by a single goroutine, it is not thread safe.
    46  	// It is the responsibility of the 'stopper' not to close the channel more then once.
    47  	StopChannel chan struct{}
    48  
    49  	// Internal state
    50  	stream       *ImpatientStream
    51  	blockBuff    []*common.Block
    52  	latestSeq    uint64
    53  	endpoint     string
    54  	conn         *grpc.ClientConn
    55  	cancelStream func()
    56  }
    57  
    58  // Clone returns a copy of this BlockPuller initialized
    59  // for the given channel
    60  func (p *BlockPuller) Clone() *BlockPuller {
    61  	// Clone by value
    62  	copy := *p
    63  	// Reset internal state
    64  	copy.stream = nil
    65  	copy.blockBuff = nil
    66  	copy.latestSeq = 0
    67  	copy.endpoint = ""
    68  	copy.conn = nil
    69  	copy.cancelStream = nil
    70  	return &copy
    71  }
    72  
    73  // Close makes the BlockPuller close the connection and stream
    74  // with the remote endpoint, and wipe the internal block buffer.
    75  func (p *BlockPuller) Close() {
    76  	p.disconnect()
    77  	p.blockBuff = nil
    78  }
    79  
    80  func (p *BlockPuller) disconnect() {
    81  	if p.cancelStream != nil {
    82  		p.cancelStream()
    83  	}
    84  	p.cancelStream = nil
    85  
    86  	if p.conn != nil {
    87  		p.conn.Close()
    88  	}
    89  	p.conn = nil
    90  	p.endpoint = ""
    91  	p.latestSeq = 0
    92  }
    93  
    94  // PullBlock blocks until a block with the given sequence is fetched
    95  // from some remote ordering node, or until consecutive failures
    96  // of fetching the block exceed MaxPullBlockRetries.
    97  func (p *BlockPuller) PullBlock(seq uint64) *common.Block {
    98  	retriesLeft := p.MaxPullBlockRetries
    99  	for {
   100  		block := p.tryFetchBlock(seq)
   101  		if block != nil {
   102  			return block
   103  		}
   104  		retriesLeft--
   105  		if retriesLeft == 0 && p.MaxPullBlockRetries > 0 {
   106  			p.Logger.Errorf("Failed pulling block [%d]: retry count exhausted(%d)", seq, p.MaxPullBlockRetries)
   107  			return nil
   108  		}
   109  
   110  		if waitOnStop(p.RetryTimeout, p.StopChannel) {
   111  			p.Logger.Info("Received a stop signal")
   112  			return nil
   113  		}
   114  	}
   115  }
   116  
   117  // HeightsByEndpoints returns the block heights by endpoints of orderers
   118  func (p *BlockPuller) HeightsByEndpoints() (map[string]uint64, error) {
   119  	endpointsInfo := p.probeEndpoints(0)
   120  	res := make(map[string]uint64)
   121  	for endpoint, endpointInfo := range endpointsInfo.byEndpoints() {
   122  		endpointInfo.conn.Close()
   123  		res[endpoint] = endpointInfo.lastBlockSeq + 1
   124  	}
   125  	p.Logger.Info("Returning the heights of OSNs mapped by endpoints", res)
   126  	return res, endpointsInfo.err
   127  }
   128  
   129  // UpdateEndpoints assigns the new endpoints and disconnects from the current one.
   130  func (p *BlockPuller) UpdateEndpoints(endpoints []EndpointCriteria) {
   131  	p.Logger.Debugf("Updating endpoints: %v", endpoints)
   132  	p.Endpoints = endpoints
   133  	// TODO FAB-18121 Disconnect only if the currently connected endpoint was dropped or has changes in its TLSRootCAs
   134  	p.disconnect()
   135  }
   136  
   137  // waitOnStop waits duration, but returns immediately with true if the stop channel fires first.
   138  func waitOnStop(duration time.Duration, stop <-chan struct{}) bool {
   139  	select {
   140  	case <-stop:
   141  		return true
   142  	case <-time.After(duration):
   143  		return false
   144  	}
   145  }
   146  
   147  func (p *BlockPuller) tryFetchBlock(seq uint64) *common.Block {
   148  	block := p.popBlock(seq)
   149  	if block != nil {
   150  		return block
   151  	}
   152  
   153  	var reConnected bool
   154  
   155  	for retriesLeft := p.MaxPullBlockRetries; p.isDisconnected(); retriesLeft-- {
   156  		reConnected = true
   157  		p.connectToSomeEndpoint(seq)
   158  		if p.isDisconnected() {
   159  			p.Logger.Debugf("Failed to connect to some endpoint, going to try again in %v", p.RetryTimeout)
   160  
   161  			if waitOnStop(p.RetryTimeout, p.StopChannel) {
   162  				p.Logger.Info("Received a stop signal")
   163  				return nil
   164  			}
   165  		}
   166  		if retriesLeft == 0 && p.MaxPullBlockRetries > 0 {
   167  			p.Logger.Errorf("Failed to connect to some endpoint, attempts exhausted(%d), seq: %d, endpoints: %v",
   168  				p.MaxPullBlockRetries, seq, p.Endpoints)
   169  			return nil
   170  		}
   171  	}
   172  
   173  	// Else, buffer is empty. So we need to pull blocks
   174  	// to re-fill it.
   175  	if err := p.pullBlocks(seq, reConnected); err != nil {
   176  		p.Logger.Errorf("Failed pulling blocks: %v", err)
   177  		// Something went wrong, disconnect. and return nil
   178  		p.Close()
   179  		// If we have a block in the buffer, return it.
   180  		if len(p.blockBuff) > 0 {
   181  			return p.blockBuff[0]
   182  		}
   183  		return nil
   184  	}
   185  
   186  	if err := p.VerifyBlockSequence(p.blockBuff, p.Channel); err != nil {
   187  		p.Close()
   188  		p.Logger.Errorf("Failed verifying received blocks: %v", err)
   189  		return nil
   190  	}
   191  
   192  	// At this point, the buffer is full, so shift it and return the first block.
   193  	return p.popBlock(seq)
   194  }
   195  
   196  func (p *BlockPuller) setCancelStreamFunc(f func()) {
   197  	p.cancelStream = f
   198  }
   199  
   200  func (p *BlockPuller) pullBlocks(seq uint64, reConnected bool) error {
   201  	env, err := p.seekNextEnvelope(seq)
   202  	if err != nil {
   203  		p.Logger.Errorf("Failed creating seek envelope: %v", err)
   204  		return err
   205  	}
   206  
   207  	stream, err := p.obtainStream(reConnected, env, seq)
   208  	if err != nil {
   209  		return err
   210  	}
   211  
   212  	var totalSize int
   213  	p.blockBuff = nil
   214  	nextExpectedSequence := seq
   215  	for totalSize < p.MaxTotalBufferBytes && nextExpectedSequence <= p.latestSeq {
   216  		resp, err := stream.Recv()
   217  		if err != nil {
   218  			p.Logger.Errorf("Failed receiving next block from %s: %v", p.endpoint, err)
   219  			return err
   220  		}
   221  
   222  		block, err := extractBlockFromResponse(resp)
   223  		if err != nil {
   224  			p.Logger.Errorf("Received a bad block from %s: %v", p.endpoint, err)
   225  			return err
   226  		}
   227  		seq := block.Header.Number
   228  		if seq != nextExpectedSequence {
   229  			p.Logger.Errorf("Expected to receive sequence %d but got %d instead", nextExpectedSequence, seq)
   230  			return errors.Errorf("got unexpected sequence from %s - (%d) instead of (%d)", p.endpoint, seq, nextExpectedSequence)
   231  		}
   232  		size := blockSize(block)
   233  		totalSize += size
   234  		p.blockBuff = append(p.blockBuff, block)
   235  		nextExpectedSequence++
   236  		p.Logger.Infof("Got block [%d] of size %d KB from %s", seq, size/1024, p.endpoint)
   237  	}
   238  	return nil
   239  }
   240  
   241  func (p *BlockPuller) obtainStream(reConnected bool, env *common.Envelope, seq uint64) (*ImpatientStream, error) {
   242  	var stream *ImpatientStream
   243  	var err error
   244  	if reConnected {
   245  		p.Logger.Infof("Sending request for block [%d] to %s", seq, p.endpoint)
   246  		stream, err = p.requestBlocks(p.endpoint, NewImpatientStream(p.conn, p.FetchTimeout), env)
   247  		if err != nil {
   248  			return nil, err
   249  		}
   250  		// Stream established successfully.
   251  		// In next iterations of this function, reuse it.
   252  		p.stream = stream
   253  	} else {
   254  		// Reuse previous stream
   255  		stream = p.stream
   256  	}
   257  
   258  	p.setCancelStreamFunc(stream.cancelFunc)
   259  	return stream, nil
   260  }
   261  
   262  // popBlock pops a block from the in-memory buffer and returns it,
   263  // or returns nil if the buffer is empty or the block doesn't match
   264  // the given wanted sequence.
   265  func (p *BlockPuller) popBlock(seq uint64) *common.Block {
   266  	if len(p.blockBuff) == 0 {
   267  		return nil
   268  	}
   269  	block, rest := p.blockBuff[0], p.blockBuff[1:]
   270  	p.blockBuff = rest
   271  	// If the requested block sequence is the wrong one, discard the buffer
   272  	// to start fetching blocks all over again.
   273  	if seq != block.Header.Number {
   274  		p.blockBuff = nil
   275  		return nil
   276  	}
   277  	return block
   278  }
   279  
   280  func (p *BlockPuller) isDisconnected() bool {
   281  	return p.conn == nil
   282  }
   283  
   284  // connectToSomeEndpoint makes the BlockPuller connect to some endpoint that has
   285  // the given minimum block sequence.
   286  func (p *BlockPuller) connectToSomeEndpoint(minRequestedSequence uint64) {
   287  	// Probe all endpoints in parallel, searching an endpoint with a given minimum block sequence
   288  	// and then sort them by their endpoints to a map.
   289  	endpointsInfo := p.probeEndpoints(minRequestedSequence).byEndpoints()
   290  	if len(endpointsInfo) == 0 {
   291  		p.Logger.Warningf("Could not connect to any endpoint of %v", p.Endpoints)
   292  		return
   293  	}
   294  
   295  	// Choose a random endpoint out of the available endpoints
   296  	chosenEndpoint := randomEndpoint(endpointsInfo)
   297  	// Disconnect all connections but this endpoint
   298  	for endpoint, endpointInfo := range endpointsInfo {
   299  		if endpoint == chosenEndpoint {
   300  			continue
   301  		}
   302  		endpointInfo.conn.Close()
   303  	}
   304  
   305  	p.conn = endpointsInfo[chosenEndpoint].conn
   306  	p.endpoint = chosenEndpoint
   307  	p.latestSeq = endpointsInfo[chosenEndpoint].lastBlockSeq
   308  
   309  	p.Logger.Infof("Connected to %s with last block seq of %d", p.endpoint, p.latestSeq)
   310  }
   311  
   312  // probeEndpoints reaches to all endpoints known and returns the latest block sequences
   313  // of the endpoints, as well as gRPC connections to them.
   314  func (p *BlockPuller) probeEndpoints(minRequestedSequence uint64) *endpointInfoBucket {
   315  	endpointsInfo := make(chan *endpointInfo, len(p.Endpoints))
   316  
   317  	var wg sync.WaitGroup
   318  	wg.Add(len(p.Endpoints))
   319  
   320  	var forbiddenErr uint32
   321  	var unavailableErr uint32
   322  
   323  	for _, endpoint := range p.Endpoints {
   324  		go func(endpoint EndpointCriteria) {
   325  			defer wg.Done()
   326  			ei, err := p.probeEndpoint(endpoint, minRequestedSequence)
   327  			if err != nil {
   328  				p.Logger.Warningf("Received error of type '%v' from %s", err, endpoint.Endpoint)
   329  				p.Logger.Debugf("%s's TLSRootCAs are %s", endpoint.Endpoint, endpoint.TLSRootCAs)
   330  				if err == ErrForbidden {
   331  					atomic.StoreUint32(&forbiddenErr, 1)
   332  				}
   333  				if err == ErrServiceUnavailable {
   334  					atomic.StoreUint32(&unavailableErr, 1)
   335  				}
   336  				return
   337  			}
   338  			endpointsInfo <- ei
   339  		}(endpoint)
   340  	}
   341  	wg.Wait()
   342  
   343  	close(endpointsInfo)
   344  	eib := &endpointInfoBucket{
   345  		bucket: endpointsInfo,
   346  		logger: p.Logger,
   347  	}
   348  
   349  	if unavailableErr == 1 && len(endpointsInfo) == 0 {
   350  		eib.err = ErrServiceUnavailable
   351  	}
   352  	if forbiddenErr == 1 && len(endpointsInfo) == 0 {
   353  		eib.err = ErrForbidden
   354  	}
   355  	return eib
   356  }
   357  
   358  // probeEndpoint returns a gRPC connection and the latest block sequence of an endpoint with the given
   359  // requires minimum sequence, or error if something goes wrong.
   360  func (p *BlockPuller) probeEndpoint(endpoint EndpointCriteria, minRequestedSequence uint64) (*endpointInfo, error) {
   361  	conn, err := p.Dialer.Dial(endpoint)
   362  	if err != nil {
   363  		p.Logger.Warningf("Failed connecting to %s: %v", endpoint, err)
   364  		return nil, err
   365  	}
   366  
   367  	lastBlockSeq, err := p.fetchLastBlockSeq(minRequestedSequence, endpoint.Endpoint, conn)
   368  	if err != nil {
   369  		conn.Close()
   370  		return nil, err
   371  	}
   372  
   373  	return &endpointInfo{conn: conn, lastBlockSeq: lastBlockSeq, endpoint: endpoint.Endpoint}, nil
   374  }
   375  
   376  // randomEndpoint returns a random endpoint of the given endpointInfo
   377  func randomEndpoint(endpointsToHeight map[string]*endpointInfo) string {
   378  	var candidates []string
   379  	for endpoint := range endpointsToHeight {
   380  		candidates = append(candidates, endpoint)
   381  	}
   382  
   383  	rand.Seed(time.Now().UnixNano())
   384  	return candidates[rand.Intn(len(candidates))]
   385  }
   386  
   387  // fetchLastBlockSeq returns the last block sequence of an endpoint with the given gRPC connection.
   388  func (p *BlockPuller) fetchLastBlockSeq(minRequestedSequence uint64, endpoint string, conn *grpc.ClientConn) (uint64, error) {
   389  	env, err := p.seekLastEnvelope()
   390  	if err != nil {
   391  		p.Logger.Errorf("Failed creating seek envelope for %s: %v", endpoint, err)
   392  		return 0, err
   393  	}
   394  
   395  	stream, err := p.requestBlocks(endpoint, NewImpatientStream(conn, p.FetchTimeout), env)
   396  	if err != nil {
   397  		return 0, err
   398  	}
   399  	defer stream.abort()
   400  
   401  	resp, err := stream.Recv()
   402  	if err != nil {
   403  		p.Logger.Errorf("Failed receiving the latest block from %s: %v", endpoint, err)
   404  		return 0, err
   405  	}
   406  
   407  	block, err := extractBlockFromResponse(resp)
   408  	if err != nil {
   409  		p.Logger.Warningf("Received %v from %s: %v", resp, endpoint, err)
   410  		return 0, err
   411  	}
   412  	stream.CloseSend()
   413  
   414  	seq := block.Header.Number
   415  	if seq < minRequestedSequence {
   416  		err := errors.Errorf("minimum requested sequence is %d but %s is at sequence %d", minRequestedSequence, endpoint, seq)
   417  		p.Logger.Infof("Skipping pulling from %s: %v", endpoint, err)
   418  		return 0, err
   419  	}
   420  
   421  	p.Logger.Infof("%s is at block sequence of %d", endpoint, seq)
   422  	return block.Header.Number, nil
   423  }
   424  
   425  // requestBlocks starts requesting blocks from the given endpoint, using the given ImpatientStreamCreator by sending
   426  // the given envelope.
   427  // It returns a stream that is used to pull blocks, or error if something goes wrong.
   428  func (p *BlockPuller) requestBlocks(endpoint string, newStream ImpatientStreamCreator, env *common.Envelope) (*ImpatientStream, error) {
   429  	stream, err := newStream()
   430  	if err != nil {
   431  		p.Logger.Warningf("Failed establishing deliver stream with %s", endpoint)
   432  		return nil, err
   433  	}
   434  
   435  	if err := stream.Send(env); err != nil {
   436  		p.Logger.Errorf("Failed sending seek envelope to %s: %v", endpoint, err)
   437  		stream.abort()
   438  		return nil, err
   439  	}
   440  	return stream, nil
   441  }
   442  
   443  func extractBlockFromResponse(resp *orderer.DeliverResponse) (*common.Block, error) {
   444  	switch t := resp.Type.(type) {
   445  	case *orderer.DeliverResponse_Block:
   446  		block := t.Block
   447  		if block == nil {
   448  			return nil, errors.New("block is nil")
   449  		}
   450  		if block.Data == nil {
   451  			return nil, errors.New("block data is nil")
   452  		}
   453  		if block.Header == nil {
   454  			return nil, errors.New("block header is nil")
   455  		}
   456  		if block.Metadata == nil || len(block.Metadata.Metadata) == 0 {
   457  			return nil, errors.New("block metadata is empty")
   458  		}
   459  		return block, nil
   460  	case *orderer.DeliverResponse_Status:
   461  		if t.Status == common.Status_FORBIDDEN {
   462  			return nil, ErrForbidden
   463  		}
   464  		if t.Status == common.Status_SERVICE_UNAVAILABLE {
   465  			return nil, ErrServiceUnavailable
   466  		}
   467  		return nil, errors.Errorf("faulty node, received: %v", resp)
   468  	default:
   469  		return nil, errors.Errorf("response is of type %v, but expected a block", reflect.TypeOf(resp.Type))
   470  	}
   471  }
   472  
   473  func (p *BlockPuller) seekLastEnvelope() (*common.Envelope, error) {
   474  	return protoutil.CreateSignedEnvelopeWithTLSBinding(
   475  		common.HeaderType_DELIVER_SEEK_INFO,
   476  		p.Channel,
   477  		p.Signer,
   478  		last(),
   479  		int32(0),
   480  		uint64(0),
   481  		util.ComputeSHA256(p.TLSCert),
   482  	)
   483  }
   484  
   485  func (p *BlockPuller) seekNextEnvelope(startSeq uint64) (*common.Envelope, error) {
   486  	return protoutil.CreateSignedEnvelopeWithTLSBinding(
   487  		common.HeaderType_DELIVER_SEEK_INFO,
   488  		p.Channel,
   489  		p.Signer,
   490  		nextSeekInfo(startSeq),
   491  		int32(0),
   492  		uint64(0),
   493  		util.ComputeSHA256(p.TLSCert),
   494  	)
   495  }
   496  
   497  func last() *orderer.SeekInfo {
   498  	return &orderer.SeekInfo{
   499  		Start:         &orderer.SeekPosition{Type: &orderer.SeekPosition_Newest{Newest: &orderer.SeekNewest{}}},
   500  		Stop:          &orderer.SeekPosition{Type: &orderer.SeekPosition_Specified{Specified: &orderer.SeekSpecified{Number: math.MaxUint64}}},
   501  		Behavior:      orderer.SeekInfo_BLOCK_UNTIL_READY,
   502  		ErrorResponse: orderer.SeekInfo_BEST_EFFORT,
   503  	}
   504  }
   505  
   506  func nextSeekInfo(startSeq uint64) *orderer.SeekInfo {
   507  	return &orderer.SeekInfo{
   508  		Start:         &orderer.SeekPosition{Type: &orderer.SeekPosition_Specified{Specified: &orderer.SeekSpecified{Number: startSeq}}},
   509  		Stop:          &orderer.SeekPosition{Type: &orderer.SeekPosition_Specified{Specified: &orderer.SeekSpecified{Number: math.MaxUint64}}},
   510  		Behavior:      orderer.SeekInfo_BLOCK_UNTIL_READY,
   511  		ErrorResponse: orderer.SeekInfo_BEST_EFFORT,
   512  	}
   513  }
   514  
   515  func blockSize(block *common.Block) int {
   516  	return len(protoutil.MarshalOrPanic(block))
   517  }
   518  
   519  type endpointInfo struct {
   520  	endpoint     string
   521  	conn         *grpc.ClientConn
   522  	lastBlockSeq uint64
   523  }
   524  
   525  type endpointInfoBucket struct {
   526  	bucket <-chan *endpointInfo
   527  	logger *flogging.FabricLogger
   528  	err    error
   529  }
   530  
   531  func (eib endpointInfoBucket) byEndpoints() map[string]*endpointInfo {
   532  	infoByEndpoints := make(map[string]*endpointInfo)
   533  	for endpointInfo := range eib.bucket {
   534  		if _, exists := infoByEndpoints[endpointInfo.endpoint]; exists {
   535  			eib.logger.Warningf("Duplicate endpoint found(%s), skipping it", endpointInfo.endpoint)
   536  			endpointInfo.conn.Close()
   537  			continue
   538  		}
   539  		infoByEndpoints[endpointInfo.endpoint] = endpointInfo
   540  	}
   541  	return infoByEndpoints
   542  }
   543  
   544  // ImpatientStreamCreator creates an ImpatientStream
   545  type ImpatientStreamCreator func() (*ImpatientStream, error)
   546  
   547  // ImpatientStream aborts the stream if it waits for too long for a message.
   548  type ImpatientStream struct {
   549  	waitTimeout time.Duration
   550  	orderer.AtomicBroadcast_DeliverClient
   551  	cancelFunc func()
   552  }
   553  
   554  func (stream *ImpatientStream) abort() {
   555  	stream.cancelFunc()
   556  }
   557  
   558  // Recv blocks until a response is received from the stream or the
   559  // timeout expires.
   560  func (stream *ImpatientStream) Recv() (*orderer.DeliverResponse, error) {
   561  	// Initialize a timeout to cancel the stream when it expires
   562  	timeout := time.NewTimer(stream.waitTimeout)
   563  	defer timeout.Stop()
   564  
   565  	responseChan := make(chan errorAndResponse, 1)
   566  
   567  	// receive waitGroup ensures the goroutine below exits before
   568  	// this function exits.
   569  	var receive sync.WaitGroup
   570  	receive.Add(1)
   571  	defer receive.Wait()
   572  
   573  	go func() {
   574  		defer receive.Done()
   575  		resp, err := stream.AtomicBroadcast_DeliverClient.Recv()
   576  		responseChan <- errorAndResponse{err: err, resp: resp}
   577  	}()
   578  
   579  	select {
   580  	case <-timeout.C:
   581  		stream.cancelFunc()
   582  		return nil, errors.Errorf("didn't receive a response within %v", stream.waitTimeout)
   583  	case respAndErr := <-responseChan:
   584  		return respAndErr.resp, respAndErr.err
   585  	}
   586  }
   587  
   588  // NewImpatientStream returns a ImpatientStreamCreator that creates impatientStreams.
   589  func NewImpatientStream(conn *grpc.ClientConn, waitTimeout time.Duration) ImpatientStreamCreator {
   590  	return func() (*ImpatientStream, error) {
   591  		abc := orderer.NewAtomicBroadcastClient(conn)
   592  		ctx, cancel := context.WithCancel(context.Background())
   593  
   594  		stream, err := abc.Deliver(ctx)
   595  		if err != nil {
   596  			cancel()
   597  			return nil, err
   598  		}
   599  
   600  		once := &sync.Once{}
   601  		return &ImpatientStream{
   602  			waitTimeout: waitTimeout,
   603  			// The stream might be canceled while Close() is being called, but also
   604  			// while a timeout expires, so ensure it's only called once.
   605  			cancelFunc: func() {
   606  				once.Do(cancel)
   607  			},
   608  			AtomicBroadcast_DeliverClient: stream,
   609  		}, nil
   610  	}
   611  }
   612  
   613  type errorAndResponse struct {
   614  	err  error
   615  	resp *orderer.DeliverResponse
   616  }