github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/orderer/common/blockcutter/blockcutter_test.go (about)

     1  /*
     2  Copyright hechain. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package blockcutter_test
     8  
     9  import (
    10  	. "github.com/onsi/ginkgo"
    11  	. "github.com/onsi/gomega"
    12  
    13  	"github.com/hechain20/hechain/orderer/common/blockcutter"
    14  	"github.com/hechain20/hechain/orderer/common/blockcutter/mock"
    15  	cb "github.com/hyperledger/fabric-protos-go/common"
    16  	ab "github.com/hyperledger/fabric-protos-go/orderer"
    17  )
    18  
    19  var _ = Describe("Blockcutter", func() {
    20  	var (
    21  		bc                blockcutter.Receiver
    22  		fakeConfig        *mock.OrdererConfig
    23  		fakeConfigFetcher *mock.OrdererConfigFetcher
    24  
    25  		metrics               *blockcutter.Metrics
    26  		fakeBlockFillDuration *mock.MetricsHistogram
    27  	)
    28  
    29  	BeforeEach(func() {
    30  		fakeConfig = &mock.OrdererConfig{}
    31  		fakeConfigFetcher = &mock.OrdererConfigFetcher{}
    32  		fakeConfigFetcher.OrdererConfigReturns(fakeConfig, true)
    33  
    34  		fakeBlockFillDuration = &mock.MetricsHistogram{}
    35  		fakeBlockFillDuration.WithReturns(fakeBlockFillDuration)
    36  		metrics = &blockcutter.Metrics{
    37  			BlockFillDuration: fakeBlockFillDuration,
    38  		}
    39  
    40  		bc = blockcutter.NewReceiverImpl("mychannel", fakeConfigFetcher, metrics)
    41  	})
    42  
    43  	Describe("Ordered", func() {
    44  		var message *cb.Envelope
    45  
    46  		BeforeEach(func() {
    47  			fakeConfig.BatchSizeReturns(&ab.BatchSize{
    48  				MaxMessageCount:   2,
    49  				PreferredMaxBytes: 100,
    50  			})
    51  
    52  			message = &cb.Envelope{Payload: []byte("Twenty Bytes of Data"), Signature: []byte("Twenty Bytes of Data")}
    53  		})
    54  
    55  		It("adds the message to the pending batches", func() {
    56  			batches, pending := bc.Ordered(message)
    57  			Expect(batches).To(BeEmpty())
    58  			Expect(pending).To(BeTrue())
    59  			Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(0))
    60  		})
    61  
    62  		Context("when enough batches to fill the max message count are enqueued", func() {
    63  			It("cuts the batch", func() {
    64  				batches, pending := bc.Ordered(message)
    65  				Expect(batches).To(BeEmpty())
    66  				Expect(pending).To(BeTrue())
    67  				batches, pending = bc.Ordered(message)
    68  				Expect(len(batches)).To(Equal(1))
    69  				Expect(len(batches[0])).To(Equal(2))
    70  				Expect(pending).To(BeFalse())
    71  
    72  				Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(1))
    73  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically(">", 0))
    74  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically("<", 1))
    75  				Expect(fakeBlockFillDuration.WithCallCount()).To(Equal(1))
    76  				Expect(fakeBlockFillDuration.WithArgsForCall(0)).To(Equal([]string{"channel", "mychannel"}))
    77  			})
    78  		})
    79  
    80  		Context("when the message does not exceed max message count or preferred size", func() {
    81  			BeforeEach(func() {
    82  				fakeConfig.BatchSizeReturns(&ab.BatchSize{
    83  					MaxMessageCount:   3,
    84  					PreferredMaxBytes: 100,
    85  				})
    86  			})
    87  
    88  			It("adds the message to the pending batches", func() {
    89  				batches, pending := bc.Ordered(message)
    90  				Expect(batches).To(BeEmpty())
    91  				Expect(pending).To(BeTrue())
    92  				batches, pending = bc.Ordered(message)
    93  				Expect(batches).To(BeEmpty())
    94  				Expect(pending).To(BeTrue())
    95  				Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(0))
    96  			})
    97  		})
    98  
    99  		Context("when the message is larger than the preferred max bytes", func() {
   100  			BeforeEach(func() {
   101  				fakeConfig.BatchSizeReturns(&ab.BatchSize{
   102  					MaxMessageCount:   3,
   103  					PreferredMaxBytes: 30,
   104  				})
   105  			})
   106  
   107  			It("cuts the batch immediately", func() {
   108  				batches, pending := bc.Ordered(message)
   109  				Expect(len(batches)).To(Equal(1))
   110  				Expect(pending).To(BeFalse())
   111  				Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(1))
   112  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(Equal(float64(0)))
   113  				Expect(fakeBlockFillDuration.WithCallCount()).To(Equal(1))
   114  				Expect(fakeBlockFillDuration.WithArgsForCall(0)).To(Equal([]string{"channel", "mychannel"}))
   115  			})
   116  		})
   117  
   118  		Context("when the message causes the batch to exceed the preferred max bytes", func() {
   119  			BeforeEach(func() {
   120  				fakeConfig.BatchSizeReturns(&ab.BatchSize{
   121  					MaxMessageCount:   3,
   122  					PreferredMaxBytes: 50,
   123  				})
   124  			})
   125  
   126  			It("cuts the previous batch immediately, enqueueing the second", func() {
   127  				batches, pending := bc.Ordered(message)
   128  				Expect(batches).To(BeEmpty())
   129  				Expect(pending).To(BeTrue())
   130  
   131  				batches, pending = bc.Ordered(message)
   132  				Expect(len(batches)).To(Equal(1))
   133  				Expect(len(batches[0])).To(Equal(1))
   134  				Expect(pending).To(BeTrue())
   135  
   136  				Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(1))
   137  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically(">", 0))
   138  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically("<", 1))
   139  				Expect(fakeBlockFillDuration.WithCallCount()).To(Equal(1))
   140  				Expect(fakeBlockFillDuration.WithArgsForCall(0)).To(Equal([]string{"channel", "mychannel"}))
   141  			})
   142  
   143  			Context("when the new message is larger than the preferred max bytes", func() {
   144  				var bigMessage *cb.Envelope
   145  
   146  				BeforeEach(func() {
   147  					bigMessage = &cb.Envelope{Payload: make([]byte, 1000)}
   148  				})
   149  
   150  				It("cuts both the previous batch and the next batch immediately", func() {
   151  					batches, pending := bc.Ordered(message)
   152  					Expect(batches).To(BeEmpty())
   153  					Expect(pending).To(BeTrue())
   154  
   155  					batches, pending = bc.Ordered(bigMessage)
   156  					Expect(len(batches)).To(Equal(2))
   157  					Expect(len(batches[0])).To(Equal(1))
   158  					Expect(len(batches[1])).To(Equal(1))
   159  					Expect(pending).To(BeFalse())
   160  
   161  					Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(2))
   162  					Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically(">", 0))
   163  					Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically("<", 1))
   164  					Expect(fakeBlockFillDuration.ObserveArgsForCall(1)).To(Equal(float64(0)))
   165  					Expect(fakeBlockFillDuration.WithCallCount()).To(Equal(2))
   166  					Expect(fakeBlockFillDuration.WithArgsForCall(0)).To(Equal([]string{"channel", "mychannel"}))
   167  					Expect(fakeBlockFillDuration.WithArgsForCall(1)).To(Equal([]string{"channel", "mychannel"}))
   168  				})
   169  			})
   170  		})
   171  
   172  		Context("when the orderer config cannot be retrieved", func() {
   173  			BeforeEach(func() {
   174  				fakeConfigFetcher.OrdererConfigReturns(nil, false)
   175  			})
   176  
   177  			It("panics", func() {
   178  				Expect(func() { bc.Ordered(message) }).To(Panic())
   179  			})
   180  		})
   181  	})
   182  
   183  	Describe("Cut", func() {
   184  		It("cuts an empty batch", func() {
   185  			batch := bc.Cut()
   186  			Expect(batch).To(BeNil())
   187  			Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(0))
   188  		})
   189  	})
   190  })