github.com/anjalikarhana/fabric@v2.1.1+incompatible/orderer/common/blockcutter/blockcutter_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package blockcutter_test
     8  
     9  import (
    10  	. "github.com/onsi/ginkgo"
    11  	. "github.com/onsi/gomega"
    12  
    13  	cb "github.com/hyperledger/fabric-protos-go/common"
    14  	ab "github.com/hyperledger/fabric-protos-go/orderer"
    15  	"github.com/hyperledger/fabric/orderer/common/blockcutter"
    16  	"github.com/hyperledger/fabric/orderer/common/blockcutter/mock"
    17  )
    18  
    19  var _ = Describe("Blockcutter", func() {
    20  	var (
    21  		bc                blockcutter.Receiver
    22  		fakeConfig        *mock.OrdererConfig
    23  		fakeConfigFetcher *mock.OrdererConfigFetcher
    24  
    25  		metrics               *blockcutter.Metrics
    26  		fakeBlockFillDuration *mock.MetricsHistogram
    27  	)
    28  
    29  	BeforeEach(func() {
    30  		fakeConfig = &mock.OrdererConfig{}
    31  		fakeConfigFetcher = &mock.OrdererConfigFetcher{}
    32  		fakeConfigFetcher.OrdererConfigReturns(fakeConfig, true)
    33  
    34  		fakeBlockFillDuration = &mock.MetricsHistogram{}
    35  		fakeBlockFillDuration.WithReturns(fakeBlockFillDuration)
    36  		metrics = &blockcutter.Metrics{
    37  			BlockFillDuration: fakeBlockFillDuration,
    38  		}
    39  
    40  		bc = blockcutter.NewReceiverImpl("mychannel", fakeConfigFetcher, metrics)
    41  	})
    42  
    43  	Describe("Ordered", func() {
    44  		var (
    45  			message *cb.Envelope
    46  		)
    47  
    48  		BeforeEach(func() {
    49  			fakeConfig.BatchSizeReturns(&ab.BatchSize{
    50  				MaxMessageCount:   2,
    51  				PreferredMaxBytes: 100,
    52  			})
    53  
    54  			message = &cb.Envelope{Payload: []byte("Twenty Bytes of Data"), Signature: []byte("Twenty Bytes of Data")}
    55  		})
    56  
    57  		It("adds the message to the pending batches", func() {
    58  			batches, pending := bc.Ordered(message)
    59  			Expect(batches).To(BeEmpty())
    60  			Expect(pending).To(BeTrue())
    61  			Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(0))
    62  		})
    63  
    64  		Context("when enough batches to fill the max message count are enqueued", func() {
    65  			It("cuts the batch", func() {
    66  				batches, pending := bc.Ordered(message)
    67  				Expect(batches).To(BeEmpty())
    68  				Expect(pending).To(BeTrue())
    69  				batches, pending = bc.Ordered(message)
    70  				Expect(len(batches)).To(Equal(1))
    71  				Expect(len(batches[0])).To(Equal(2))
    72  				Expect(pending).To(BeFalse())
    73  
    74  				Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(1))
    75  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically(">", 0))
    76  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically("<", 1))
    77  				Expect(fakeBlockFillDuration.WithCallCount()).To(Equal(1))
    78  				Expect(fakeBlockFillDuration.WithArgsForCall(0)).To(Equal([]string{"channel", "mychannel"}))
    79  			})
    80  		})
    81  
    82  		Context("when the message does not exceed max message count or preferred size", func() {
    83  			BeforeEach(func() {
    84  				fakeConfig.BatchSizeReturns(&ab.BatchSize{
    85  					MaxMessageCount:   3,
    86  					PreferredMaxBytes: 100,
    87  				})
    88  			})
    89  
    90  			It("adds the message to the pending batches", func() {
    91  				batches, pending := bc.Ordered(message)
    92  				Expect(batches).To(BeEmpty())
    93  				Expect(pending).To(BeTrue())
    94  				batches, pending = bc.Ordered(message)
    95  				Expect(batches).To(BeEmpty())
    96  				Expect(pending).To(BeTrue())
    97  				Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(0))
    98  			})
    99  		})
   100  
   101  		Context("when the message is larger than the preferred max bytes", func() {
   102  			BeforeEach(func() {
   103  				fakeConfig.BatchSizeReturns(&ab.BatchSize{
   104  					MaxMessageCount:   3,
   105  					PreferredMaxBytes: 30,
   106  				})
   107  			})
   108  
   109  			It("cuts the batch immediately", func() {
   110  				batches, pending := bc.Ordered(message)
   111  				Expect(len(batches)).To(Equal(1))
   112  				Expect(pending).To(BeFalse())
   113  				Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(1))
   114  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(Equal(float64(0)))
   115  				Expect(fakeBlockFillDuration.WithCallCount()).To(Equal(1))
   116  				Expect(fakeBlockFillDuration.WithArgsForCall(0)).To(Equal([]string{"channel", "mychannel"}))
   117  			})
   118  		})
   119  
   120  		Context("when the message causes the batch to exceed the preferred max bytes", func() {
   121  			BeforeEach(func() {
   122  				fakeConfig.BatchSizeReturns(&ab.BatchSize{
   123  					MaxMessageCount:   3,
   124  					PreferredMaxBytes: 50,
   125  				})
   126  			})
   127  
   128  			It("cuts the previous batch immediately, enqueueing the second", func() {
   129  				batches, pending := bc.Ordered(message)
   130  				Expect(batches).To(BeEmpty())
   131  				Expect(pending).To(BeTrue())
   132  
   133  				batches, pending = bc.Ordered(message)
   134  				Expect(len(batches)).To(Equal(1))
   135  				Expect(len(batches[0])).To(Equal(1))
   136  				Expect(pending).To(BeTrue())
   137  
   138  				Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(1))
   139  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically(">", 0))
   140  				Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically("<", 1))
   141  				Expect(fakeBlockFillDuration.WithCallCount()).To(Equal(1))
   142  				Expect(fakeBlockFillDuration.WithArgsForCall(0)).To(Equal([]string{"channel", "mychannel"}))
   143  			})
   144  
   145  			Context("when the new message is larger than the preferred max bytes", func() {
   146  				var (
   147  					bigMessage *cb.Envelope
   148  				)
   149  
   150  				BeforeEach(func() {
   151  					bigMessage = &cb.Envelope{Payload: make([]byte, 1000)}
   152  				})
   153  
   154  				It("cuts both the previous batch and the next batch immediately", func() {
   155  					batches, pending := bc.Ordered(message)
   156  					Expect(batches).To(BeEmpty())
   157  					Expect(pending).To(BeTrue())
   158  
   159  					batches, pending = bc.Ordered(bigMessage)
   160  					Expect(len(batches)).To(Equal(2))
   161  					Expect(len(batches[0])).To(Equal(1))
   162  					Expect(len(batches[1])).To(Equal(1))
   163  					Expect(pending).To(BeFalse())
   164  
   165  					Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(2))
   166  					Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically(">", 0))
   167  					Expect(fakeBlockFillDuration.ObserveArgsForCall(0)).To(BeNumerically("<", 1))
   168  					Expect(fakeBlockFillDuration.ObserveArgsForCall(1)).To(Equal(float64(0)))
   169  					Expect(fakeBlockFillDuration.WithCallCount()).To(Equal(2))
   170  					Expect(fakeBlockFillDuration.WithArgsForCall(0)).To(Equal([]string{"channel", "mychannel"}))
   171  					Expect(fakeBlockFillDuration.WithArgsForCall(1)).To(Equal([]string{"channel", "mychannel"}))
   172  				})
   173  			})
   174  		})
   175  
   176  		Context("when the orderer config cannot be retrieved", func() {
   177  			BeforeEach(func() {
   178  				fakeConfigFetcher.OrdererConfigReturns(nil, false)
   179  			})
   180  
   181  			It("panics", func() {
   182  				Expect(func() { bc.Ordered(message) }).To(Panic())
   183  			})
   184  		})
   185  	})
   186  
   187  	Describe("Cut", func() {
   188  		It("cuts an empty batch", func() {
   189  			batch := bc.Cut()
   190  			Expect(batch).To(BeNil())
   191  			Expect(fakeBlockFillDuration.ObserveCallCount()).To(Equal(0))
   192  		})
   193  	})
   194  })