github.com/darrenli6/fabric-sdk-example@v0.0.0-20220109053535-94b13b56df8c/orderer/common/blockcutter/blockcutter.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8                   http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package blockcutter
    18  
    19  import (
    20  	"github.com/hyperledger/fabric/common/config"
    21  	"github.com/hyperledger/fabric/orderer/common/filter"
    22  	cb "github.com/hyperledger/fabric/protos/common"
    23  
    24  	"github.com/op/go-logging"
    25  )
    26  
    27  var logger = logging.MustGetLogger("orderer/common/blockcutter")
    28  
    29  // Receiver defines a sink for the ordered broadcast messages
    30  type Receiver interface {
    31  	// Ordered should be invoked sequentially as messages are ordered
    32  	// If the current message valid, and no batches need to be cut:
    33  	//   - Ordered will return nil, nil, and true (indicating valid Tx).
    34  	// If the current message valid, and batches need to be cut:
    35  	//   - Ordered will return 1 or 2 batches of messages, 1 or 2 batches of committers, and true (indicating valid Tx).
    36  	// If the current message is invalid:
    37  	//   - Ordered will return nil, nil, and false (to indicate invalid Tx).
    38  	//
    39  	// Given a valid message, if the current message needs to be isolated (as determined during filtering).
    40  	//   - Ordered will return:
    41  	//     * The pending batch of (if not empty), and a second batch containing only the isolated message.
    42  	//     * The corresponding batches of committers.
    43  	//     * true (indicating ok).
    44  	// Otherwise, given a valid message, the pending batch, if not empty, will be cut and returned if:
    45  	//   - The current message needs to be isolated (as determined during filtering).
    46  	//   - The current message will cause the pending batch size in bytes to exceed BatchSize.PreferredMaxBytes.
    47  	//   - After adding the current message to the pending batch, the message count has reached BatchSize.MaxMessageCount.
    48  	//
    49  	// In any case, `pending` is set to true if there are still messages pending in the receiver after cutting the block.
    50  	Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, committers [][]filter.Committer, validTx bool, pending bool)
    51  
    52  	// Cut returns the current batch and starts a new one
    53  	Cut() ([]*cb.Envelope, []filter.Committer)
    54  }
    55  
    56  type receiver struct {
    57  	sharedConfigManager   config.Orderer
    58  	filters               *filter.RuleSet
    59  	pendingBatch          []*cb.Envelope
    60  	pendingBatchSizeBytes uint32
    61  	pendingCommitters     []filter.Committer
    62  }
    63  
    64  // NewReceiverImpl creates a Receiver implementation based on the given configtxorderer manager and filters
    65  func NewReceiverImpl(sharedConfigManager config.Orderer, filters *filter.RuleSet) Receiver {
    66  	return &receiver{
    67  		sharedConfigManager: sharedConfigManager,
    68  		filters:             filters,
    69  	}
    70  }
    71  
    72  // Ordered should be invoked sequentially as messages are ordered
    73  // If the current message valid, and no batches need to be cut:
    74  //   - Ordered will return nil, nil, true (indicating valid tx) and true (indicating there are pending messages).
    75  // If the current message valid, and batches need to be cut:
    76  //   - Ordered will return 1 or 2 batches of messages, 1 or 2 batches of committers, and true (indicating valid tx).
    77  // If the current message is invalid:
    78  //   - Ordered will return nil, nil, and false (to indicate invalid tx).
    79  //
    80  // Given a valid message, if the current message needs to be isolated (as determined during filtering).
    81  //   - Ordered will return:
    82  //     * The pending batch of (if not empty), and a second batch containing only the isolated message.
    83  //     * The corresponding batches of committers.
    84  //     * true (indicating valid tx).
    85  // Otherwise, given a valid message, the pending batch, if not empty, will be cut and returned if:
    86  //   - The current message needs to be isolated (as determined during filtering).
    87  //   - The current message will cause the pending batch size in bytes to exceed BatchSize.PreferredMaxBytes.
    88  //   - After adding the current message to the pending batch, the message count has reached BatchSize.MaxMessageCount.
    89  //
    90  // In any case, `pending` is set to true if there are still messages pending in the receiver after cutting the block.
    91  func (r *receiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, committerBatches [][]filter.Committer, validTx bool, pending bool) {
    92  	// The messages must be filtered a second time in case configuration has changed since the message was received
    93  	committer, err := r.filters.Apply(msg)
    94  	if err != nil {
    95  		logger.Debugf("Rejecting message: %s", err)
    96  		return // We don't bother to determine `pending` here as it's not processed in error case
    97  	}
    98  
    99  	// message is valid
   100  	validTx = true
   101  
   102  	messageSizeBytes := messageSizeBytes(msg)
   103  
   104  	if committer.Isolated() || messageSizeBytes > r.sharedConfigManager.BatchSize().PreferredMaxBytes {
   105  
   106  		if committer.Isolated() {
   107  			logger.Debugf("Found message which requested to be isolated, cutting into its own batch")
   108  		} else {
   109  			logger.Debugf("The current message, with %v bytes, is larger than the preferred batch size of %v bytes and will be isolated.", messageSizeBytes, r.sharedConfigManager.BatchSize().PreferredMaxBytes)
   110  		}
   111  
   112  		// cut pending batch, if it has any messages
   113  		if len(r.pendingBatch) > 0 {
   114  			messageBatch, committerBatch := r.Cut()
   115  			messageBatches = append(messageBatches, messageBatch)
   116  			committerBatches = append(committerBatches, committerBatch)
   117  		}
   118  
   119  		// create new batch with single message
   120  		messageBatches = append(messageBatches, []*cb.Envelope{msg})
   121  		committerBatches = append(committerBatches, []filter.Committer{committer})
   122  
   123  		return
   124  	}
   125  
   126  	messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes > r.sharedConfigManager.BatchSize().PreferredMaxBytes
   127  
   128  	if messageWillOverflowBatchSizeBytes {
   129  		logger.Debugf("The current message, with %v bytes, will overflow the pending batch of %v bytes.", messageSizeBytes, r.pendingBatchSizeBytes)
   130  		logger.Debugf("Pending batch would overflow if current message is added, cutting batch now.")
   131  		messageBatch, committerBatch := r.Cut()
   132  		messageBatches = append(messageBatches, messageBatch)
   133  		committerBatches = append(committerBatches, committerBatch)
   134  	}
   135  
   136  	logger.Debugf("Enqueuing message into batch")
   137  	r.pendingBatch = append(r.pendingBatch, msg)
   138  	r.pendingBatchSizeBytes += messageSizeBytes
   139  	r.pendingCommitters = append(r.pendingCommitters, committer)
   140  	pending = true
   141  
   142  	if uint32(len(r.pendingBatch)) >= r.sharedConfigManager.BatchSize().MaxMessageCount {
   143  		logger.Debugf("Batch size met, cutting batch")
   144  		messageBatch, committerBatch := r.Cut()
   145  		messageBatches = append(messageBatches, messageBatch)
   146  		committerBatches = append(committerBatches, committerBatch)
   147  		pending = false
   148  	}
   149  
   150  	return
   151  }
   152  
   153  // Cut returns the current batch and starts a new one
   154  func (r *receiver) Cut() ([]*cb.Envelope, []filter.Committer) {
   155  	batch := r.pendingBatch
   156  	r.pendingBatch = nil
   157  	committers := r.pendingCommitters
   158  	r.pendingCommitters = nil
   159  	r.pendingBatchSizeBytes = 0
   160  	return batch, committers
   161  }
   162  
   163  func messageSizeBytes(message *cb.Envelope) uint32 {
   164  	return uint32(len(message.Payload) + len(message.Signature))
   165  }