github.com/kaituanwang/hyperledger@v2.0.1+incompatible/orderer/consensus/kafka/chain_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package kafka
     8  
     9  import (
    10  	"context"
    11  	"errors"
    12  	"fmt"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/Shopify/sarama"
    17  	"github.com/Shopify/sarama/mocks"
    18  	"github.com/golang/protobuf/proto"
    19  	cb "github.com/hyperledger/fabric-protos-go/common"
    20  	ab "github.com/hyperledger/fabric-protos-go/orderer"
    21  	"github.com/hyperledger/fabric/common/channelconfig"
    22  	"github.com/hyperledger/fabric/common/metrics/disabled"
    23  	"github.com/hyperledger/fabric/orderer/common/blockcutter"
    24  	"github.com/hyperledger/fabric/orderer/common/msgprocessor"
    25  	mockkafka "github.com/hyperledger/fabric/orderer/consensus/kafka/mock"
    26  	mockblockcutter "github.com/hyperledger/fabric/orderer/mocks/common/blockcutter"
    27  	mockmultichannel "github.com/hyperledger/fabric/orderer/mocks/common/multichannel"
    28  	"github.com/hyperledger/fabric/protoutil"
    29  	. "github.com/onsi/gomega"
    30  	"github.com/stretchr/testify/assert"
    31  	"github.com/stretchr/testify/mock"
    32  	"github.com/stretchr/testify/require"
    33  )
    34  
    35  //go:generate counterfeiter -o mock/orderer_capabilities.go --fake-name OrdererCapabilities . ordererCapabilities
    36  
    37  type ordererCapabilities interface {
    38  	channelconfig.OrdererCapabilities
    39  }
    40  
    41  //go:generate counterfeiter -o mock/channel_capabilities.go --fake-name ChannelCapabilities . channelCapabilities
    42  
    43  type channelCapabilities interface {
    44  	channelconfig.ChannelCapabilities
    45  }
    46  
    47  //go:generate counterfeiter -o mock/channel_config.go --fake-name ChannelConfig . channelConfig
    48  
    49  type channelConfig interface {
    50  	channelconfig.Channel
    51  }
    52  
    53  func newMockOrderer(batchTimeout time.Duration, brokers []string, resubmission bool) *mockkafka.OrdererConfig {
    54  	mockCapabilities := &mockkafka.OrdererCapabilities{}
    55  	mockCapabilities.ResubmissionReturns(resubmission)
    56  	mockOrderer := &mockkafka.OrdererConfig{}
    57  	mockOrderer.CapabilitiesReturns(mockCapabilities)
    58  	mockOrderer.BatchTimeoutReturns(batchTimeout)
    59  	mockOrderer.KafkaBrokersReturns(brokers)
    60  	return mockOrderer
    61  }
    62  
    63  func newMockChannel() *mockkafka.ChannelConfig {
    64  	mockCapabilities := &mockkafka.ChannelCapabilities{}
    65  	mockCapabilities.ConsensusTypeMigrationReturns(false)
    66  	mockChannel := &mockkafka.ChannelConfig{}
    67  	mockChannel.CapabilitiesReturns(mockCapabilities)
    68  	return mockChannel
    69  }
    70  
    71  var (
    72  	extraShortTimeout = 1 * time.Millisecond
    73  	shortTimeout      = 1 * time.Second
    74  	longTimeout       = 1 * time.Hour
    75  
    76  	hitBranch = 50 * time.Millisecond
    77  )
    78  
    79  func TestChain(t *testing.T) {
    80  
    81  	oldestOffset := int64(0)
    82  	newestOffset := int64(5)
    83  	lastOriginalOffsetProcessed := int64(0)
    84  	lastResubmittedConfigOffset := int64(0)
    85  
    86  	message := sarama.StringEncoder("messageFoo")
    87  
    88  	newMocks := func(t *testing.T) (mockChannel channel, mockBroker *sarama.MockBroker, mockSupport *mockmultichannel.ConsenterSupport) {
    89  		mockChannel = newChannel(channelNameForTest(t), defaultPartition)
    90  		mockBroker = sarama.NewMockBroker(t, 0)
    91  		mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
    92  			"MetadataRequest": sarama.NewMockMetadataResponse(t).
    93  				SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
    94  				SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
    95  			"ProduceRequest": sarama.NewMockProduceResponse(t).
    96  				SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError),
    97  			"OffsetRequest": sarama.NewMockOffsetResponse(t).
    98  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
    99  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   100  			"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   101  				SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message),
   102  		})
   103  		mockSupport = &mockmultichannel.ConsenterSupport{
   104  			ChannelIDVal:     mockChannel.topic(),
   105  			HeightVal:        uint64(3),
   106  			SharedConfigVal:  newMockOrderer(0, []string{mockBroker.Addr()}, false),
   107  			ChannelConfigVal: newMockChannel(),
   108  		}
   109  		return
   110  	}
   111  
   112  	t.Run("New", func(t *testing.T) {
   113  		_, mockBroker, mockSupport := newMocks(t)
   114  		defer func() { mockBroker.Close() }()
   115  		fakeLastOffsetPersisted := &mockkafka.MetricsGauge{}
   116  		fakeLastOffsetPersisted.WithReturns(fakeLastOffsetPersisted)
   117  		mockConsenter.(*consenterImpl).metrics.LastOffsetPersisted = fakeLastOffsetPersisted
   118  		chain, err := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   119  
   120  		assert.NoError(t, err, "Expected newChain to return without errors")
   121  		select {
   122  		case <-chain.Errored():
   123  			logger.Debug("Errored() returned a closed channel as expected")
   124  		default:
   125  			t.Fatal("Errored() should have returned a closed channel")
   126  		}
   127  
   128  		select {
   129  		case <-chain.haltChan:
   130  			t.Fatal("haltChan should have been open")
   131  		default:
   132  			logger.Debug("haltChan is open as it should be")
   133  		}
   134  
   135  		select {
   136  		case <-chain.startChan:
   137  			t.Fatal("startChan should have been open")
   138  		default:
   139  			logger.Debug("startChan is open as it should be")
   140  		}
   141  
   142  		require.Equal(t, fakeLastOffsetPersisted.WithCallCount(), 1)
   143  		assert.Equal(t, fakeLastOffsetPersisted.WithArgsForCall(0), []string{"channel", channelNameForTest(t)})
   144  		require.Equal(t, fakeLastOffsetPersisted.SetCallCount(), 1)
   145  		assert.Equal(t, fakeLastOffsetPersisted.SetArgsForCall(0), float64(newestOffset-1))
   146  	})
   147  
   148  	t.Run("Start", func(t *testing.T) {
   149  		_, mockBroker, mockSupport := newMocks(t)
   150  		defer func() { mockBroker.Close() }()
   151  		// Set to -1 because we haven't sent the CONNECT message yet
   152  		chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   153  
   154  		chain.Start()
   155  		select {
   156  		case <-chain.startChan:
   157  			logger.Debug("startChan is closed as it should be")
   158  		case <-time.After(shortTimeout):
   159  			t.Fatal("startChan should have been closed by now")
   160  		}
   161  
   162  		// Trigger the haltChan clause in the processMessagesToBlocks goroutine
   163  		close(chain.haltChan)
   164  	})
   165  
   166  	t.Run("Halt", func(t *testing.T) {
   167  		_, mockBroker, mockSupport := newMocks(t)
   168  		defer func() { mockBroker.Close() }()
   169  		chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   170  
   171  		chain.Start()
   172  		select {
   173  		case <-chain.startChan:
   174  			logger.Debug("startChan is closed as it should be")
   175  		case <-time.After(shortTimeout):
   176  			t.Fatal("startChan should have been closed by now")
   177  		}
   178  
   179  		// Wait till the start phase has completed, then:
   180  		chain.Halt()
   181  
   182  		select {
   183  		case <-chain.haltChan:
   184  			logger.Debug("haltChan is closed as it should be")
   185  		case <-time.After(shortTimeout):
   186  			t.Fatal("haltChan should have been closed")
   187  		}
   188  
   189  		select {
   190  		case <-chain.errorChan:
   191  			logger.Debug("errorChan is closed as it should be")
   192  		case <-time.After(shortTimeout):
   193  			t.Fatal("errorChan should have been closed")
   194  		}
   195  	})
   196  
   197  	t.Run("DoubleHalt", func(t *testing.T) {
   198  		_, mockBroker, mockSupport := newMocks(t)
   199  		defer func() { mockBroker.Close() }()
   200  		chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   201  
   202  		chain.Start()
   203  		select {
   204  		case <-chain.startChan:
   205  			logger.Debug("startChan is closed as it should be")
   206  		case <-time.After(shortTimeout):
   207  			t.Fatal("startChan should have been closed by now")
   208  		}
   209  
   210  		chain.Halt()
   211  
   212  		assert.NotPanics(t, func() { chain.Halt() }, "Calling Halt() more than once shouldn't panic")
   213  	})
   214  
   215  	t.Run("StartWithProducerForChannelError", func(t *testing.T) {
   216  		_, mockBroker, mockSupport := newMocks(t)
   217  		defer func() { mockBroker.Close() }()
   218  		// Point to an empty brokers list
   219  		mockSupportCopy := *mockSupport
   220  		mockSupportCopy.SharedConfigVal = newMockOrderer(longTimeout, []string{}, false)
   221  
   222  		chain, _ := newChain(mockConsenter, &mockSupportCopy, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   223  
   224  		// The production path will actually call chain.Start(). This is
   225  		// functionally equivalent and allows us to run assertions on it.
   226  		assert.Panics(t, func() { startThread(chain) }, "Expected the Start() call to panic")
   227  	})
   228  
   229  	t.Run("StartWithConnectMessageError", func(t *testing.T) {
   230  		// Note that this test is affected by the following parameters:
   231  		// - Net.ReadTimeout
   232  		// - Consumer.Retry.Backoff
   233  		// - Metadata.Retry.Max
   234  		mockChannel, mockBroker, mockSupport := newMocks(t)
   235  		defer func() { mockBroker.Close() }()
   236  		chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   237  
   238  		// Have the broker return an ErrNotLeaderForPartition error
   239  		mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   240  			"MetadataRequest": sarama.NewMockMetadataResponse(t).
   241  				SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   242  				SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   243  			"ProduceRequest": sarama.NewMockProduceResponse(t).
   244  				SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotLeaderForPartition),
   245  			"OffsetRequest": sarama.NewMockOffsetResponse(t).
   246  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   247  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   248  			"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   249  				SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message),
   250  		})
   251  
   252  		assert.Panics(t, func() { startThread(chain) }, "Expected the Start() call to panic")
   253  	})
   254  
   255  	t.Run("enqueueIfNotStarted", func(t *testing.T) {
   256  		mockChannel, mockBroker, mockSupport := newMocks(t)
   257  		defer func() { mockBroker.Close() }()
   258  		chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   259  
   260  		// As in StartWithConnectMessageError, have the broker return an
   261  		// ErrNotLeaderForPartition error, i.e. cause an error in the
   262  		// 'post connect message' step.
   263  		mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   264  			"MetadataRequest": sarama.NewMockMetadataResponse(t).
   265  				SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   266  				SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   267  			"ProduceRequest": sarama.NewMockProduceResponse(t).
   268  				SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotLeaderForPartition),
   269  			"OffsetRequest": sarama.NewMockOffsetResponse(t).
   270  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   271  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   272  			"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   273  				SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message),
   274  		})
   275  
   276  		// We don't need to create a legit envelope here as it's not inspected during this test
   277  		assert.False(t, chain.enqueue(newRegularMessage([]byte("fooMessage"))), "Expected enqueue call to return false")
   278  	})
   279  
   280  	t.Run("StartWithConsumerForChannelError", func(t *testing.T) {
   281  		// Note that this test is affected by the following parameters:
   282  		// - Net.ReadTimeout
   283  		// - Consumer.Retry.Backoff
   284  		// - Metadata.Retry.Max
   285  
   286  		mockChannel, mockBroker, mockSupport := newMocks(t)
   287  		defer func() { mockBroker.Close() }()
   288  
   289  		// Provide an out-of-range offset
   290  		chain, _ := newChain(mockConsenter, mockSupport, newestOffset, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   291  
   292  		mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   293  			"MetadataRequest": sarama.NewMockMetadataResponse(t).
   294  				SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   295  				SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   296  			"ProduceRequest": sarama.NewMockProduceResponse(t).
   297  				SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError),
   298  			"OffsetRequest": sarama.NewMockOffsetResponse(t).
   299  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   300  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   301  			"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   302  				SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message),
   303  		})
   304  
   305  		assert.Panics(t, func() { startThread(chain) }, "Expected the Start() call to panic")
   306  	})
   307  
   308  	t.Run("enqueueProper", func(t *testing.T) {
   309  		mockChannel, mockBroker, mockSupport := newMocks(t)
   310  		defer func() { mockBroker.Close() }()
   311  		chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   312  
   313  		mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   314  			"MetadataRequest": sarama.NewMockMetadataResponse(t).
   315  				SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   316  				SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   317  			"ProduceRequest": sarama.NewMockProduceResponse(t).
   318  				SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError),
   319  			"OffsetRequest": sarama.NewMockOffsetResponse(t).
   320  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   321  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   322  			"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   323  				SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message),
   324  		})
   325  
   326  		chain.Start()
   327  		select {
   328  		case <-chain.startChan:
   329  			logger.Debug("startChan is closed as it should be")
   330  		case <-time.After(shortTimeout):
   331  			t.Fatal("startChan should have been closed by now")
   332  		}
   333  
   334  		// enqueue should have access to the post path, and its ProduceRequest should go by without error.
   335  		// We don't need to create a legit envelope here as it's not inspected during this test
   336  		assert.True(t, chain.enqueue(newRegularMessage([]byte("fooMessage"))), "Expected enqueue call to return true")
   337  
   338  		chain.Halt()
   339  	})
   340  
   341  	t.Run("enqueueIfHalted", func(t *testing.T) {
   342  		mockChannel, mockBroker, mockSupport := newMocks(t)
   343  		defer func() { mockBroker.Close() }()
   344  		chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   345  
   346  		mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   347  			"MetadataRequest": sarama.NewMockMetadataResponse(t).
   348  				SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   349  				SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   350  			"ProduceRequest": sarama.NewMockProduceResponse(t).
   351  				SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError),
   352  			"OffsetRequest": sarama.NewMockOffsetResponse(t).
   353  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   354  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   355  			"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   356  				SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message),
   357  		})
   358  
   359  		chain.Start()
   360  		select {
   361  		case <-chain.startChan:
   362  			logger.Debug("startChan is closed as it should be")
   363  		case <-time.After(shortTimeout):
   364  			t.Fatal("startChan should have been closed by now")
   365  		}
   366  		chain.Halt()
   367  
   368  		// haltChan should close access to the post path.
   369  		// We don't need to create a legit envelope here as it's not inspected during this test
   370  		assert.False(t, chain.enqueue(newRegularMessage([]byte("fooMessage"))), "Expected enqueue call to return false")
   371  	})
   372  
   373  	t.Run("enqueueError", func(t *testing.T) {
   374  		mockChannel, mockBroker, mockSupport := newMocks(t)
   375  		defer func() { mockBroker.Close() }()
   376  		chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   377  
   378  		// Use the "good" handler map that allows the Stage to complete without
   379  		// issues
   380  		mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   381  			"MetadataRequest": sarama.NewMockMetadataResponse(t).
   382  				SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   383  				SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   384  			"ProduceRequest": sarama.NewMockProduceResponse(t).
   385  				SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError),
   386  			"OffsetRequest": sarama.NewMockOffsetResponse(t).
   387  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   388  				SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   389  			"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   390  				SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message),
   391  		})
   392  
   393  		chain.Start()
   394  		select {
   395  		case <-chain.startChan:
   396  			logger.Debug("startChan is closed as it should be")
   397  		case <-time.After(shortTimeout):
   398  			t.Fatal("startChan should have been closed by now")
   399  		}
   400  		defer chain.Halt()
   401  
   402  		// Now make it so that the next ProduceRequest is met with an error
   403  		mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   404  			"ProduceRequest": sarama.NewMockProduceResponse(t).
   405  				SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotLeaderForPartition),
   406  		})
   407  
   408  		// We don't need to create a legit envelope here as it's not inspected during this test
   409  		assert.False(t, chain.enqueue(newRegularMessage([]byte("fooMessage"))), "Expected enqueue call to return false")
   410  	})
   411  
   412  	t.Run("Order", func(t *testing.T) {
   413  		t.Run("ErrorIfNotStarted", func(t *testing.T) {
   414  			_, mockBroker, mockSupport := newMocks(t)
   415  			defer func() { mockBroker.Close() }()
   416  			chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   417  
   418  			// We don't need to create a legit envelope here as it's not inspected during this test
   419  			assert.Error(t, chain.Order(&cb.Envelope{}, uint64(0)))
   420  		})
   421  
   422  		t.Run("Proper", func(t *testing.T) {
   423  			mockChannel, mockBroker, mockSupport := newMocks(t)
   424  			defer func() { mockBroker.Close() }()
   425  			chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   426  
   427  			mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   428  				"MetadataRequest": sarama.NewMockMetadataResponse(t).
   429  					SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   430  					SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   431  				"ProduceRequest": sarama.NewMockProduceResponse(t).
   432  					SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError),
   433  				"OffsetRequest": sarama.NewMockOffsetResponse(t).
   434  					SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   435  					SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   436  				"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   437  					SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message),
   438  			})
   439  
   440  			chain.Start()
   441  			defer chain.Halt()
   442  
   443  			select {
   444  			case <-chain.startChan:
   445  				logger.Debug("startChan is closed as it should be")
   446  			case <-time.After(shortTimeout):
   447  				t.Fatal("startChan should have been closed by now")
   448  			}
   449  
   450  			// We don't need to create a legit envelope here as it's not inspected during this test
   451  			assert.NoError(t, chain.Order(&cb.Envelope{}, uint64(0)), "Expect Order successfully")
   452  		})
   453  	})
   454  
   455  	t.Run("Configure", func(t *testing.T) {
   456  		t.Run("ErrorIfNotStarted", func(t *testing.T) {
   457  			_, mockBroker, mockSupport := newMocks(t)
   458  			defer func() { mockBroker.Close() }()
   459  			chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   460  
   461  			// We don't need to create a legit envelope here as it's not inspected during this test
   462  			assert.Error(t, chain.Configure(&cb.Envelope{}, uint64(0)))
   463  		})
   464  
   465  		t.Run("Proper", func(t *testing.T) {
   466  			mockChannel, mockBroker, mockSupport := newMocks(t)
   467  			defer func() { mockBroker.Close() }()
   468  			chain, _ := newChain(mockConsenter, mockSupport, newestOffset-1, lastOriginalOffsetProcessed, lastResubmittedConfigOffset)
   469  
   470  			mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   471  				"MetadataRequest": sarama.NewMockMetadataResponse(t).
   472  					SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   473  					SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   474  				"ProduceRequest": sarama.NewMockProduceResponse(t).
   475  					SetError(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError),
   476  				"OffsetRequest": sarama.NewMockOffsetResponse(t).
   477  					SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   478  					SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   479  				"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   480  					SetMessage(mockChannel.topic(), mockChannel.partition(), newestOffset, message),
   481  			})
   482  
   483  			chain.Start()
   484  			defer chain.Halt()
   485  
   486  			select {
   487  			case <-chain.startChan:
   488  				logger.Debug("startChan is closed as it should be")
   489  			case <-time.After(shortTimeout):
   490  				t.Fatal("startChan should have been closed by now")
   491  			}
   492  
   493  			// We don't need to create a legit envelope here as it's not inspected during this test
   494  			assert.NoError(t, chain.Configure(&cb.Envelope{}, uint64(0)), "Expect Configure successfully")
   495  		})
   496  	})
   497  }
   498  
   499  func TestSetupTopicForChannel(t *testing.T) {
   500  
   501  	mockChannel := newChannel(channelNameForTest(t), defaultPartition)
   502  	haltChan := make(chan struct{})
   503  
   504  	mockBrokerNoError := sarama.NewMockBroker(t, 0)
   505  	defer mockBrokerNoError.Close()
   506  	metadataResponse := sarama.NewMockMetadataResponse(t)
   507  	metadataResponse.SetBroker(mockBrokerNoError.Addr(),
   508  		mockBrokerNoError.BrokerID())
   509  	metadataResponse.SetController(mockBrokerNoError.BrokerID())
   510  
   511  	mdrUnknownTopicOrPartition := &sarama.MetadataResponse{
   512  		Version:      1,
   513  		Brokers:      []*sarama.Broker{sarama.NewBroker(mockBrokerNoError.Addr())},
   514  		ControllerID: -1,
   515  		Topics: []*sarama.TopicMetadata{
   516  			{
   517  				Err:  sarama.ErrUnknownTopicOrPartition,
   518  				Name: mockChannel.topic(),
   519  			},
   520  		},
   521  	}
   522  
   523  	mockBrokerNoError.SetHandlerByMap(map[string]sarama.MockResponse{
   524  		"CreateTopicsRequest": sarama.NewMockWrapper(
   525  			&sarama.CreateTopicsResponse{
   526  				TopicErrors: map[string]*sarama.TopicError{
   527  					mockChannel.topic(): {
   528  						Err: sarama.ErrNoError}}}),
   529  		"MetadataRequest": sarama.NewMockWrapper(mdrUnknownTopicOrPartition)})
   530  
   531  	mockBrokerTopicExists := sarama.NewMockBroker(t, 1)
   532  	defer mockBrokerTopicExists.Close()
   533  	mockBrokerTopicExists.SetHandlerByMap(map[string]sarama.MockResponse{
   534  		"CreateTopicsRequest": sarama.NewMockWrapper(
   535  			&sarama.CreateTopicsResponse{
   536  				TopicErrors: map[string]*sarama.TopicError{
   537  					mockChannel.topic(): {
   538  						Err: sarama.ErrTopicAlreadyExists}}}),
   539  		"MetadataRequest": sarama.NewMockWrapper(&sarama.MetadataResponse{
   540  			Version: 1,
   541  			Topics: []*sarama.TopicMetadata{
   542  				{
   543  					Name: channelNameForTest(t),
   544  					Err:  sarama.ErrNoError}}})})
   545  
   546  	mockBrokerInvalidTopic := sarama.NewMockBroker(t, 2)
   547  	defer mockBrokerInvalidTopic.Close()
   548  	metadataResponse = sarama.NewMockMetadataResponse(t)
   549  	metadataResponse.SetBroker(mockBrokerInvalidTopic.Addr(),
   550  		mockBrokerInvalidTopic.BrokerID())
   551  	metadataResponse.SetController(mockBrokerInvalidTopic.BrokerID())
   552  	mockBrokerInvalidTopic.SetHandlerByMap(map[string]sarama.MockResponse{
   553  		"CreateTopicsRequest": sarama.NewMockWrapper(
   554  			&sarama.CreateTopicsResponse{
   555  				TopicErrors: map[string]*sarama.TopicError{
   556  					mockChannel.topic(): {
   557  						Err: sarama.ErrInvalidTopic}}}),
   558  		"MetadataRequest": metadataResponse})
   559  
   560  	mockBrokerInvalidTopic2 := sarama.NewMockBroker(t, 3)
   561  	defer mockBrokerInvalidTopic2.Close()
   562  	mockBrokerInvalidTopic2.SetHandlerByMap(map[string]sarama.MockResponse{
   563  		"CreateTopicsRequest": sarama.NewMockWrapper(
   564  			&sarama.CreateTopicsResponse{
   565  				TopicErrors: map[string]*sarama.TopicError{
   566  					mockChannel.topic(): {
   567  						Err: sarama.ErrInvalidTopic}}}),
   568  		"MetadataRequest": sarama.NewMockWrapper(&sarama.MetadataResponse{
   569  			Version:      1,
   570  			Brokers:      []*sarama.Broker{sarama.NewBroker(mockBrokerInvalidTopic2.Addr())},
   571  			ControllerID: mockBrokerInvalidTopic2.BrokerID()})})
   572  
   573  	closedBroker := sarama.NewMockBroker(t, 99)
   574  	badAddress := closedBroker.Addr()
   575  	closedBroker.Close()
   576  
   577  	var tests = []struct {
   578  		name         string
   579  		brokers      []string
   580  		brokerConfig *sarama.Config
   581  		version      sarama.KafkaVersion
   582  		expectErr    bool
   583  		errorMsg     string
   584  	}{
   585  		{
   586  			name:         "Unsupported Version",
   587  			brokers:      []string{mockBrokerNoError.Addr()},
   588  			brokerConfig: sarama.NewConfig(),
   589  			version:      sarama.V0_9_0_0,
   590  			expectErr:    false,
   591  		},
   592  		{
   593  			name:         "No Error",
   594  			brokers:      []string{mockBrokerNoError.Addr()},
   595  			brokerConfig: sarama.NewConfig(),
   596  			version:      sarama.V0_10_2_0,
   597  			expectErr:    false,
   598  		},
   599  		{
   600  			name:         "Topic Exists",
   601  			brokers:      []string{mockBrokerTopicExists.Addr()},
   602  			brokerConfig: sarama.NewConfig(),
   603  			version:      sarama.V0_10_2_0,
   604  			expectErr:    false,
   605  		},
   606  		{
   607  			name:         "Invalid Topic",
   608  			brokers:      []string{mockBrokerInvalidTopic.Addr()},
   609  			brokerConfig: sarama.NewConfig(),
   610  			version:      sarama.V0_10_2_0,
   611  			expectErr:    true,
   612  			errorMsg:     "process asked to exit",
   613  		},
   614  		{
   615  			name:         "Multiple Brokers - One No Error",
   616  			brokers:      []string{badAddress, mockBrokerNoError.Addr()},
   617  			brokerConfig: sarama.NewConfig(),
   618  			version:      sarama.V0_10_2_0,
   619  			expectErr:    false,
   620  		},
   621  		{
   622  			name:         "Multiple Brokers - All Errors",
   623  			brokers:      []string{badAddress, badAddress},
   624  			brokerConfig: sarama.NewConfig(),
   625  			version:      sarama.V0_10_2_0,
   626  			expectErr:    true,
   627  			errorMsg:     "failed to retrieve metadata",
   628  		},
   629  	}
   630  
   631  	for _, test := range tests {
   632  		test := test
   633  		t.Run(test.name, func(t *testing.T) {
   634  			test.brokerConfig.Version = test.version
   635  			err := setupTopicForChannel(
   636  				mockRetryOptions,
   637  				haltChan,
   638  				test.brokers,
   639  				test.brokerConfig,
   640  				&sarama.TopicDetail{
   641  					NumPartitions:     1,
   642  					ReplicationFactor: 2},
   643  				mockChannel)
   644  			if test.expectErr {
   645  				assert.Contains(t, err.Error(), test.errorMsg)
   646  			} else {
   647  				assert.NoError(t, err)
   648  			}
   649  		})
   650  	}
   651  
   652  }
   653  
   654  func TestSetupProducerForChannel(t *testing.T) {
   655  	if testing.Short() {
   656  		t.Skip("Skipping test in short mode")
   657  	}
   658  
   659  	mockBroker := sarama.NewMockBroker(t, 0)
   660  	defer mockBroker.Close()
   661  
   662  	mockChannel := newChannel(channelNameForTest(t), defaultPartition)
   663  
   664  	haltChan := make(chan struct{})
   665  
   666  	t.Run("Proper", func(t *testing.T) {
   667  		metadataResponse := new(sarama.MetadataResponse)
   668  		metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID())
   669  		metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError)
   670  		mockBroker.Returns(metadataResponse)
   671  
   672  		producer, err := setupProducerForChannel(mockConsenter.retryOptions(), haltChan, []string{mockBroker.Addr()}, mockBrokerConfig, mockChannel)
   673  		assert.NoError(t, err, "Expected the setupProducerForChannel call to return without errors")
   674  		assert.NoError(t, producer.Close(), "Expected to close the producer without errors")
   675  	})
   676  
   677  	t.Run("WithError", func(t *testing.T) {
   678  		_, err := setupProducerForChannel(mockConsenter.retryOptions(), haltChan, []string{}, mockBrokerConfig, mockChannel)
   679  		assert.Error(t, err, "Expected the setupProducerForChannel call to return an error")
   680  	})
   681  }
   682  
   683  func TestGetHealthyClusterReplicaInfo(t *testing.T) {
   684  	mockBroker := sarama.NewMockBroker(t, 0)
   685  	defer mockBroker.Close()
   686  
   687  	mockChannel := newChannel(channelNameForTest(t), defaultPartition)
   688  
   689  	haltChan := make(chan struct{})
   690  
   691  	t.Run("Proper", func(t *testing.T) {
   692  		ids := []int32{int32(1), int32(2)}
   693  		metadataResponse := new(sarama.MetadataResponse)
   694  		metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID())
   695  		metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), ids, nil, sarama.ErrNoError)
   696  		mockBroker.Returns(metadataResponse)
   697  
   698  		replicaIDs, err := getHealthyClusterReplicaInfo(mockConsenter.retryOptions(), haltChan, []string{mockBroker.Addr()}, mockBrokerConfig, mockChannel)
   699  		assert.NoError(t, err, "Expected the getHealthyClusterReplicaInfo call to return without errors")
   700  		assert.Equal(t, replicaIDs, ids)
   701  	})
   702  
   703  	t.Run("WithError", func(t *testing.T) {
   704  		_, err := getHealthyClusterReplicaInfo(mockConsenter.retryOptions(), haltChan, []string{}, mockBrokerConfig, mockChannel)
   705  		assert.Error(t, err, "Expected the getHealthyClusterReplicaInfo call to return an error")
   706  	})
   707  }
   708  
   709  func TestSetupConsumerForChannel(t *testing.T) {
   710  	mockBroker := sarama.NewMockBroker(t, 0)
   711  	defer func() { mockBroker.Close() }()
   712  
   713  	mockChannel := newChannel(channelNameForTest(t), defaultPartition)
   714  
   715  	oldestOffset := int64(0)
   716  	newestOffset := int64(5)
   717  
   718  	startFrom := int64(3)
   719  	message := sarama.StringEncoder("messageFoo")
   720  
   721  	mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   722  		"MetadataRequest": sarama.NewMockMetadataResponse(t).
   723  			SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   724  			SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   725  		"OffsetRequest": sarama.NewMockOffsetResponse(t).
   726  			SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   727  			SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   728  		"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   729  			SetMessage(mockChannel.topic(), mockChannel.partition(), startFrom, message),
   730  	})
   731  
   732  	haltChan := make(chan struct{})
   733  
   734  	t.Run("ProperParent", func(t *testing.T) {
   735  		parentConsumer, err := setupParentConsumerForChannel(mockConsenter.retryOptions(), haltChan, []string{mockBroker.Addr()}, mockBrokerConfig, mockChannel)
   736  		assert.NoError(t, err, "Expected the setupParentConsumerForChannel call to return without errors")
   737  		assert.NoError(t, parentConsumer.Close(), "Expected to close the parentConsumer without errors")
   738  	})
   739  
   740  	t.Run("ProperChannel", func(t *testing.T) {
   741  		parentConsumer, _ := setupParentConsumerForChannel(mockConsenter.retryOptions(), haltChan, []string{mockBroker.Addr()}, mockBrokerConfig, mockChannel)
   742  		defer func() { parentConsumer.Close() }()
   743  		channelConsumer, err := setupChannelConsumerForChannel(mockConsenter.retryOptions(), haltChan, parentConsumer, mockChannel, newestOffset)
   744  		assert.NoError(t, err, "Expected the setupChannelConsumerForChannel call to return without errors")
   745  		assert.NoError(t, channelConsumer.Close(), "Expected to close the channelConsumer without errors")
   746  	})
   747  
   748  	t.Run("WithParentConsumerError", func(t *testing.T) {
   749  		// Provide an empty brokers list
   750  		_, err := setupParentConsumerForChannel(mockConsenter.retryOptions(), haltChan, []string{}, mockBrokerConfig, mockChannel)
   751  		assert.Error(t, err, "Expected the setupParentConsumerForChannel call to return an error")
   752  	})
   753  
   754  	t.Run("WithChannelConsumerError", func(t *testing.T) {
   755  		// Provide an out-of-range offset
   756  		parentConsumer, _ := setupParentConsumerForChannel(mockConsenter.retryOptions(), haltChan, []string{mockBroker.Addr()}, mockBrokerConfig, mockChannel)
   757  		_, err := setupChannelConsumerForChannel(mockConsenter.retryOptions(), haltChan, parentConsumer, mockChannel, newestOffset+1)
   758  		defer func() { parentConsumer.Close() }()
   759  		assert.Error(t, err, "Expected the setupChannelConsumerForChannel call to return an error")
   760  	})
   761  }
   762  
   763  func TestCloseKafkaObjects(t *testing.T) {
   764  	mockChannel := newChannel(channelNameForTest(t), defaultPartition)
   765  
   766  	mockSupport := &mockmultichannel.ConsenterSupport{
   767  		ChannelIDVal: mockChannel.topic(),
   768  	}
   769  
   770  	oldestOffset := int64(0)
   771  	newestOffset := int64(5)
   772  
   773  	startFrom := int64(3)
   774  	message := sarama.StringEncoder("messageFoo")
   775  
   776  	mockBroker := sarama.NewMockBroker(t, 0)
   777  	defer func() { mockBroker.Close() }()
   778  
   779  	mockBroker.SetHandlerByMap(map[string]sarama.MockResponse{
   780  		"MetadataRequest": sarama.NewMockMetadataResponse(t).
   781  			SetBroker(mockBroker.Addr(), mockBroker.BrokerID()).
   782  			SetLeader(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID()),
   783  		"OffsetRequest": sarama.NewMockOffsetResponse(t).
   784  			SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetOldest, oldestOffset).
   785  			SetOffset(mockChannel.topic(), mockChannel.partition(), sarama.OffsetNewest, newestOffset),
   786  		"FetchRequest": sarama.NewMockFetchResponse(t, 1).
   787  			SetMessage(mockChannel.topic(), mockChannel.partition(), startFrom, message),
   788  	})
   789  
   790  	haltChan := make(chan struct{})
   791  
   792  	t.Run("Proper", func(t *testing.T) {
   793  		producer, _ := setupProducerForChannel(mockConsenter.retryOptions(), haltChan, []string{mockBroker.Addr()}, mockBrokerConfig, mockChannel)
   794  		parentConsumer, _ := setupParentConsumerForChannel(mockConsenter.retryOptions(), haltChan, []string{mockBroker.Addr()}, mockBrokerConfig, mockChannel)
   795  		channelConsumer, _ := setupChannelConsumerForChannel(mockConsenter.retryOptions(), haltChan, parentConsumer, mockChannel, startFrom)
   796  
   797  		// Set up a chain with just the minimum necessary fields instantiated so
   798  		// as to test the function
   799  		bareMinimumChain := &chainImpl{
   800  			ConsenterSupport: mockSupport,
   801  			producer:         producer,
   802  			parentConsumer:   parentConsumer,
   803  			channelConsumer:  channelConsumer,
   804  		}
   805  
   806  		errs := bareMinimumChain.closeKafkaObjects()
   807  
   808  		assert.Len(t, errs, 0, "Expected zero errors")
   809  
   810  		assert.NotPanics(t, func() {
   811  			channelConsumer.Close()
   812  		})
   813  
   814  		assert.NotPanics(t, func() {
   815  			parentConsumer.Close()
   816  		})
   817  
   818  		// TODO For some reason this panic cannot be captured by the `assert`
   819  		// test framework. Not a dealbreaker but need to investigate further.
   820  		/* assert.Panics(t, func() {
   821  			producer.Close()
   822  		}) */
   823  	})
   824  
   825  	t.Run("ChannelConsumerError", func(t *testing.T) {
   826  		producer, _ := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig)
   827  
   828  		// Unlike all other tests in this file, forcing an error on the
   829  		// channelConsumer.Close() call is more easily achieved using the mock
   830  		// Consumer. Thus we bypass the call to `setup*Consumer`.
   831  
   832  		// Have the consumer receive an ErrOutOfBrokers error.
   833  		mockParentConsumer := mocks.NewConsumer(t, nil)
   834  		mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), startFrom).YieldError(sarama.ErrOutOfBrokers)
   835  		mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), startFrom)
   836  		assert.NoError(t, err, "Expected no error when setting up the mock partition consumer")
   837  
   838  		bareMinimumChain := &chainImpl{
   839  			ConsenterSupport: mockSupport,
   840  			producer:         producer,
   841  			parentConsumer:   mockParentConsumer,
   842  			channelConsumer:  mockChannelConsumer,
   843  		}
   844  
   845  		errs := bareMinimumChain.closeKafkaObjects()
   846  
   847  		assert.Len(t, errs, 1, "Expected 1 error returned")
   848  
   849  		assert.NotPanics(t, func() {
   850  			mockChannelConsumer.Close()
   851  		})
   852  
   853  		assert.NotPanics(t, func() {
   854  			mockParentConsumer.Close()
   855  		})
   856  	})
   857  }
   858  
   859  func TestGetLastCutBlockNumber(t *testing.T) {
   860  	testCases := []struct {
   861  		name     string
   862  		input    uint64
   863  		expected uint64
   864  	}{
   865  		{"Proper", uint64(2), uint64(1)},
   866  		{"Zero", uint64(1), uint64(0)},
   867  	}
   868  	for _, tc := range testCases {
   869  		t.Run(tc.name, func(t *testing.T) {
   870  			assert.Equal(t, tc.expected, getLastCutBlockNumber(tc.input))
   871  		})
   872  	}
   873  }
   874  
   875  func TestGetLastOffsetPersisted(t *testing.T) {
   876  	mockChannel := newChannel(channelNameForTest(t), defaultPartition)
   877  	mockMetadata := &cb.Metadata{Value: protoutil.MarshalOrPanic(&ab.KafkaMetadata{
   878  		LastOffsetPersisted:         int64(5),
   879  		LastOriginalOffsetProcessed: int64(3),
   880  		LastResubmittedConfigOffset: int64(4),
   881  	})}
   882  
   883  	testCases := []struct {
   884  		name                string
   885  		md                  []byte
   886  		expectedPersisted   int64
   887  		expectedProcessed   int64
   888  		expectedResubmitted int64
   889  		panics              bool
   890  	}{
   891  		{"Proper", mockMetadata.Value, int64(5), int64(3), int64(4), false},
   892  		{"Empty", nil, sarama.OffsetOldest - 1, int64(0), int64(0), false},
   893  		{"Panics", tamperBytes(mockMetadata.Value), sarama.OffsetOldest - 1, int64(0), int64(0), true},
   894  	}
   895  
   896  	for _, tc := range testCases {
   897  		t.Run(tc.name, func(t *testing.T) {
   898  			if !tc.panics {
   899  				persisted, processed, resubmitted := getOffsets(tc.md, mockChannel.String())
   900  				assert.Equal(t, tc.expectedPersisted, persisted)
   901  				assert.Equal(t, tc.expectedProcessed, processed)
   902  				assert.Equal(t, tc.expectedResubmitted, resubmitted)
   903  			} else {
   904  				assert.Panics(t, func() {
   905  					getOffsets(tc.md, mockChannel.String())
   906  				}, "Expected getOffsets call to panic")
   907  			}
   908  		})
   909  	}
   910  }
   911  
   912  func TestSendConnectMessage(t *testing.T) {
   913  	mockBroker := sarama.NewMockBroker(t, 0)
   914  	defer func() { mockBroker.Close() }()
   915  
   916  	mockChannel := newChannel("mockChannelFoo", defaultPartition)
   917  
   918  	metadataResponse := new(sarama.MetadataResponse)
   919  	metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID())
   920  	metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError)
   921  	mockBroker.Returns(metadataResponse)
   922  
   923  	producer, _ := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig)
   924  	defer func() { producer.Close() }()
   925  
   926  	haltChan := make(chan struct{})
   927  
   928  	t.Run("Proper", func(t *testing.T) {
   929  		successResponse := new(sarama.ProduceResponse)
   930  		successResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError)
   931  		mockBroker.Returns(successResponse)
   932  
   933  		assert.NoError(t, sendConnectMessage(mockConsenter.retryOptions(), haltChan, producer, mockChannel), "Expected the sendConnectMessage call to return without errors")
   934  	})
   935  
   936  	t.Run("WithError", func(t *testing.T) {
   937  		// Note that this test is affected by the following parameters:
   938  		// - Net.ReadTimeout
   939  		// - Consumer.Retry.Backoff
   940  		// - Metadata.Retry.Max
   941  
   942  		// Have the broker return an ErrNotEnoughReplicas error
   943  		failureResponse := new(sarama.ProduceResponse)
   944  		failureResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotEnoughReplicas)
   945  		mockBroker.Returns(failureResponse)
   946  
   947  		assert.Error(t, sendConnectMessage(mockConsenter.retryOptions(), haltChan, producer, mockChannel), "Expected the sendConnectMessage call to return an error")
   948  	})
   949  }
   950  
   951  func TestSendTimeToCut(t *testing.T) {
   952  	mockBroker := sarama.NewMockBroker(t, 0)
   953  	defer func() { mockBroker.Close() }()
   954  
   955  	mockChannel := newChannel("mockChannelFoo", defaultPartition)
   956  
   957  	metadataResponse := new(sarama.MetadataResponse)
   958  	metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID())
   959  	metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError)
   960  	mockBroker.Returns(metadataResponse)
   961  
   962  	producer, err := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig)
   963  	assert.NoError(t, err, "Expected no error when setting up the sarama SyncProducer")
   964  	defer func() { producer.Close() }()
   965  
   966  	timeToCutBlockNumber := uint64(3)
   967  	var timer <-chan time.Time
   968  
   969  	t.Run("Proper", func(t *testing.T) {
   970  		successResponse := new(sarama.ProduceResponse)
   971  		successResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError)
   972  		mockBroker.Returns(successResponse)
   973  
   974  		timer = time.After(longTimeout)
   975  
   976  		assert.NoError(t, sendTimeToCut(producer, mockChannel, timeToCutBlockNumber, &timer), "Expected the sendTimeToCut call to return without errors")
   977  		assert.Nil(t, timer, "Expected the sendTimeToCut call to nil the timer")
   978  	})
   979  
   980  	t.Run("WithError", func(t *testing.T) {
   981  		// Note that this test is affected by the following parameters:
   982  		// - Net.ReadTimeout
   983  		// - Consumer.Retry.Backoff
   984  		// - Metadata.Retry.Max
   985  		failureResponse := new(sarama.ProduceResponse)
   986  		failureResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotEnoughReplicas)
   987  		mockBroker.Returns(failureResponse)
   988  
   989  		timer = time.After(longTimeout)
   990  
   991  		assert.Error(t, sendTimeToCut(producer, mockChannel, timeToCutBlockNumber, &timer), "Expected the sendTimeToCut call to return an error")
   992  		assert.Nil(t, timer, "Expected the sendTimeToCut call to nil the timer")
   993  	})
   994  }
   995  
   996  func TestProcessMessagesToBlocks(t *testing.T) {
   997  	mockBroker := sarama.NewMockBroker(t, 0)
   998  	defer func() { mockBroker.Close() }()
   999  
  1000  	mockChannel := newChannel("mockChannelFoo", defaultPartition)
  1001  
  1002  	metadataResponse := new(sarama.MetadataResponse)
  1003  	metadataResponse.AddBroker(mockBroker.Addr(), mockBroker.BrokerID())
  1004  	metadataResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), mockBroker.BrokerID(), nil, nil, sarama.ErrNoError)
  1005  	mockBroker.Returns(metadataResponse)
  1006  
  1007  	producer, _ := sarama.NewSyncProducer([]string{mockBroker.Addr()}, mockBrokerConfig)
  1008  
  1009  	mockBrokerConfigCopy := *mockBrokerConfig
  1010  	mockBrokerConfigCopy.ChannelBufferSize = 0
  1011  
  1012  	mockParentConsumer := mocks.NewConsumer(t, &mockBrokerConfigCopy)
  1013  	mpc := mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), int64(0))
  1014  	mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), int64(0))
  1015  	assert.NoError(t, err, "Expected no error when setting up the mock partition consumer")
  1016  
  1017  	t.Run("TimeToCut", func(t *testing.T) {
  1018  		t.Run("PendingMsgToCutProper", func(t *testing.T) {
  1019  			errorChan := make(chan struct{})
  1020  			close(errorChan)
  1021  			haltChan := make(chan struct{})
  1022  
  1023  			lastCutBlockNumber := uint64(3)
  1024  
  1025  			mockSupport := &mockmultichannel.ConsenterSupport{
  1026  				Blocks:          make(chan *cb.Block), // WriteBlock will post here
  1027  				BlockCutterVal:  mockblockcutter.NewReceiver(),
  1028  				ChannelIDVal:    mockChannel.topic(),
  1029  				HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  1030  				SharedConfigVal: newMockOrderer(shortTimeout/2, []string{mockBroker.Addr()}, false),
  1031  			}
  1032  			defer close(mockSupport.BlockCutterVal.Block)
  1033  
  1034  			bareMinimumChain := &chainImpl{
  1035  				producer:        producer,
  1036  				parentConsumer:  mockParentConsumer,
  1037  				channelConsumer: mockChannelConsumer,
  1038  
  1039  				consenter:          mockConsenter,
  1040  				channel:            mockChannel,
  1041  				ConsenterSupport:   mockSupport,
  1042  				lastCutBlockNumber: lastCutBlockNumber,
  1043  
  1044  				errorChan:                      errorChan,
  1045  				haltChan:                       haltChan,
  1046  				doneProcessingMessagesToBlocks: make(chan struct{}),
  1047  			}
  1048  
  1049  			// We need the mock blockcutter to deliver a non-empty batch
  1050  			go func() {
  1051  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call below return
  1052  				logger.Debugf("Mock blockcutter's Ordered call has returned")
  1053  			}()
  1054  			// We are "planting" a message directly to the mock blockcutter
  1055  			mockSupport.BlockCutterVal.Ordered(newMockEnvelope("fooMessage"))
  1056  
  1057  			done := make(chan struct{})
  1058  
  1059  			go func() {
  1060  				bareMinimumChain.processMessagesToBlocks()
  1061  				done <- struct{}{}
  1062  			}()
  1063  
  1064  			// Cut ancestors
  1065  			mockSupport.BlockCutterVal.CutAncestors = true
  1066  
  1067  			// This envelope will be added into pending list, waiting to be cut when timer fires
  1068  			mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")))))
  1069  
  1070  			go func() {
  1071  				mockSupport.BlockCutterVal.Block <- struct{}{}
  1072  				logger.Debugf("Mock blockcutter's Ordered call has returned")
  1073  			}()
  1074  
  1075  			<-mockSupport.Blocks // Wait for the first block
  1076  
  1077  			logger.Debug("Closing haltChan to exit the infinite for-loop")
  1078  			close(haltChan) // Identical to chain.Halt()
  1079  			logger.Debug("haltChan closed")
  1080  			<-done
  1081  
  1082  			if bareMinimumChain.timer != nil {
  1083  				go func() {
  1084  					<-bareMinimumChain.timer // Fire the timer for garbage collection
  1085  				}()
  1086  			}
  1087  
  1088  			assert.NotEmpty(t, mockSupport.BlockCutterVal.CurBatch, "Expected the blockCutter to be non-empty")
  1089  			assert.NotNil(t, bareMinimumChain.timer, "Expected the cutTimer to be non-nil when there are pending envelopes")
  1090  
  1091  		})
  1092  
  1093  		t.Run("ReceiveTimeToCutProper", func(t *testing.T) {
  1094  			errorChan := make(chan struct{})
  1095  			close(errorChan)
  1096  			haltChan := make(chan struct{})
  1097  
  1098  			lastCutBlockNumber := uint64(3)
  1099  
  1100  			mockSupport := &mockmultichannel.ConsenterSupport{
  1101  				Blocks:         make(chan *cb.Block), // WriteBlock will post here
  1102  				BlockCutterVal: mockblockcutter.NewReceiver(),
  1103  				ChannelIDVal:   mockChannel.topic(),
  1104  				HeightVal:      lastCutBlockNumber, // Incremented during the WriteBlock call
  1105  			}
  1106  			defer close(mockSupport.BlockCutterVal.Block)
  1107  
  1108  			bareMinimumChain := &chainImpl{
  1109  				parentConsumer:  mockParentConsumer,
  1110  				channelConsumer: mockChannelConsumer,
  1111  
  1112  				consenter:          mockConsenter,
  1113  				channel:            mockChannel,
  1114  				ConsenterSupport:   mockSupport,
  1115  				lastCutBlockNumber: lastCutBlockNumber,
  1116  
  1117  				errorChan:                      errorChan,
  1118  				haltChan:                       haltChan,
  1119  				doneProcessingMessagesToBlocks: make(chan struct{}),
  1120  			}
  1121  
  1122  			// We need the mock blockcutter to deliver a non-empty batch
  1123  			go func() {
  1124  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call below return
  1125  				logger.Debugf("Mock blockcutter's Ordered call has returned")
  1126  			}()
  1127  			// We are "planting" a message directly to the mock blockcutter
  1128  			mockSupport.BlockCutterVal.Ordered(newMockEnvelope("fooMessage"))
  1129  
  1130  			var counts []uint64
  1131  			done := make(chan struct{})
  1132  
  1133  			go func() {
  1134  				counts, err = bareMinimumChain.processMessagesToBlocks()
  1135  				done <- struct{}{}
  1136  			}()
  1137  
  1138  			// This is the wrappedMessage that the for-loop will process
  1139  			mpc.YieldMessage(newMockConsumerMessage(newTimeToCutMessage(lastCutBlockNumber + 1)))
  1140  
  1141  			<-mockSupport.Blocks // Let the `mockConsenterSupport.WriteBlock` proceed
  1142  
  1143  			logger.Debug("Closing haltChan to exit the infinite for-loop")
  1144  			close(haltChan) // Identical to chain.Halt()
  1145  			logger.Debug("haltChan closed")
  1146  			<-done
  1147  
  1148  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1149  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1150  			assert.Equal(t, uint64(1), counts[indexProcessTimeToCutPass], "Expected 1 TIMETOCUT message processed")
  1151  			assert.Equal(t, lastCutBlockNumber+1, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to be bumped up by one")
  1152  		})
  1153  
  1154  		t.Run("ReceiveTimeToCutZeroBatch", func(t *testing.T) {
  1155  			errorChan := make(chan struct{})
  1156  			close(errorChan)
  1157  			haltChan := make(chan struct{})
  1158  
  1159  			lastCutBlockNumber := uint64(3)
  1160  
  1161  			mockSupport := &mockmultichannel.ConsenterSupport{
  1162  				Blocks:         make(chan *cb.Block), // WriteBlock will post here
  1163  				BlockCutterVal: mockblockcutter.NewReceiver(),
  1164  				ChannelIDVal:   mockChannel.topic(),
  1165  				HeightVal:      lastCutBlockNumber, // Incremented during the WriteBlock call
  1166  			}
  1167  			defer close(mockSupport.BlockCutterVal.Block)
  1168  
  1169  			bareMinimumChain := &chainImpl{
  1170  				parentConsumer:  mockParentConsumer,
  1171  				channelConsumer: mockChannelConsumer,
  1172  
  1173  				channel:            mockChannel,
  1174  				ConsenterSupport:   mockSupport,
  1175  				lastCutBlockNumber: lastCutBlockNumber,
  1176  
  1177  				errorChan:                      errorChan,
  1178  				haltChan:                       haltChan,
  1179  				doneProcessingMessagesToBlocks: make(chan struct{}),
  1180  			}
  1181  
  1182  			var counts []uint64
  1183  			done := make(chan struct{})
  1184  
  1185  			go func() {
  1186  				counts, err = bareMinimumChain.processMessagesToBlocks()
  1187  				done <- struct{}{}
  1188  			}()
  1189  
  1190  			// This is the wrappedMessage that the for-loop will process
  1191  			mpc.YieldMessage(newMockConsumerMessage(newTimeToCutMessage(lastCutBlockNumber + 1)))
  1192  
  1193  			logger.Debug("Closing haltChan to exit the infinite for-loop")
  1194  			close(haltChan) // Identical to chain.Halt()
  1195  			logger.Debug("haltChan closed")
  1196  			<-done
  1197  
  1198  			assert.Error(t, err, "Expected the processMessagesToBlocks call to return an error")
  1199  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1200  			assert.Equal(t, uint64(1), counts[indexProcessTimeToCutError], "Expected 1 faulty TIMETOCUT message processed")
  1201  			assert.Equal(t, lastCutBlockNumber, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same")
  1202  		})
  1203  
  1204  		t.Run("ReceiveTimeToCutLargerThanExpected", func(t *testing.T) {
  1205  			errorChan := make(chan struct{})
  1206  			close(errorChan)
  1207  			haltChan := make(chan struct{})
  1208  
  1209  			lastCutBlockNumber := uint64(3)
  1210  
  1211  			mockSupport := &mockmultichannel.ConsenterSupport{
  1212  				Blocks:         make(chan *cb.Block), // WriteBlock will post here
  1213  				BlockCutterVal: mockblockcutter.NewReceiver(),
  1214  				ChannelIDVal:   mockChannel.topic(),
  1215  				HeightVal:      lastCutBlockNumber, // Incremented during the WriteBlock call
  1216  			}
  1217  			defer close(mockSupport.BlockCutterVal.Block)
  1218  
  1219  			bareMinimumChain := &chainImpl{
  1220  				parentConsumer:  mockParentConsumer,
  1221  				channelConsumer: mockChannelConsumer,
  1222  
  1223  				channel:            mockChannel,
  1224  				ConsenterSupport:   mockSupport,
  1225  				lastCutBlockNumber: lastCutBlockNumber,
  1226  
  1227  				errorChan:                      errorChan,
  1228  				haltChan:                       haltChan,
  1229  				doneProcessingMessagesToBlocks: make(chan struct{}),
  1230  			}
  1231  
  1232  			var counts []uint64
  1233  			done := make(chan struct{})
  1234  
  1235  			go func() {
  1236  				counts, err = bareMinimumChain.processMessagesToBlocks()
  1237  				done <- struct{}{}
  1238  			}()
  1239  
  1240  			// This is the wrappedMessage that the for-loop will process
  1241  			mpc.YieldMessage(newMockConsumerMessage(newTimeToCutMessage(lastCutBlockNumber + 2)))
  1242  
  1243  			logger.Debug("Closing haltChan to exit the infinite for-loop")
  1244  			close(haltChan) // Identical to chain.Halt()
  1245  			logger.Debug("haltChan closed")
  1246  			<-done
  1247  
  1248  			assert.Error(t, err, "Expected the processMessagesToBlocks call to return an error")
  1249  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1250  			assert.Equal(t, uint64(1), counts[indexProcessTimeToCutError], "Expected 1 faulty TIMETOCUT message processed")
  1251  			assert.Equal(t, lastCutBlockNumber, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same")
  1252  		})
  1253  
  1254  		t.Run("ReceiveTimeToCutStale", func(t *testing.T) {
  1255  			errorChan := make(chan struct{})
  1256  			close(errorChan)
  1257  			haltChan := make(chan struct{})
  1258  
  1259  			lastCutBlockNumber := uint64(3)
  1260  
  1261  			mockSupport := &mockmultichannel.ConsenterSupport{
  1262  				Blocks:         make(chan *cb.Block), // WriteBlock will post here
  1263  				BlockCutterVal: mockblockcutter.NewReceiver(),
  1264  				ChannelIDVal:   mockChannel.topic(),
  1265  				HeightVal:      lastCutBlockNumber, // Incremented during the WriteBlock call
  1266  			}
  1267  			defer close(mockSupport.BlockCutterVal.Block)
  1268  
  1269  			bareMinimumChain := &chainImpl{
  1270  				parentConsumer:  mockParentConsumer,
  1271  				channelConsumer: mockChannelConsumer,
  1272  
  1273  				channel:            mockChannel,
  1274  				ConsenterSupport:   mockSupport,
  1275  				lastCutBlockNumber: lastCutBlockNumber,
  1276  
  1277  				errorChan:                      errorChan,
  1278  				haltChan:                       haltChan,
  1279  				doneProcessingMessagesToBlocks: make(chan struct{}),
  1280  			}
  1281  
  1282  			var counts []uint64
  1283  			done := make(chan struct{})
  1284  
  1285  			go func() {
  1286  				counts, err = bareMinimumChain.processMessagesToBlocks()
  1287  				done <- struct{}{}
  1288  			}()
  1289  
  1290  			// This is the wrappedMessage that the for-loop will process
  1291  			mpc.YieldMessage(newMockConsumerMessage(newTimeToCutMessage(lastCutBlockNumber)))
  1292  
  1293  			logger.Debug("Closing haltChan to exit the infinite for-loop")
  1294  			close(haltChan) // Identical to chain.Halt()
  1295  			logger.Debug("haltChan closed")
  1296  			<-done
  1297  
  1298  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1299  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1300  			assert.Equal(t, uint64(1), counts[indexProcessTimeToCutPass], "Expected 1 TIMETOCUT message processed")
  1301  			assert.Equal(t, lastCutBlockNumber, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same")
  1302  		})
  1303  	})
  1304  
  1305  	t.Run("Connect", func(t *testing.T) {
  1306  		t.Run("ReceiveConnect", func(t *testing.T) {
  1307  			errorChan := make(chan struct{})
  1308  			close(errorChan)
  1309  			haltChan := make(chan struct{})
  1310  
  1311  			mockSupport := &mockmultichannel.ConsenterSupport{
  1312  				ChannelIDVal: mockChannel.topic(),
  1313  			}
  1314  
  1315  			bareMinimumChain := &chainImpl{
  1316  				parentConsumer:  mockParentConsumer,
  1317  				channelConsumer: mockChannelConsumer,
  1318  
  1319  				channel:          mockChannel,
  1320  				ConsenterSupport: mockSupport,
  1321  
  1322  				errorChan:                      errorChan,
  1323  				haltChan:                       haltChan,
  1324  				doneProcessingMessagesToBlocks: make(chan struct{}),
  1325  			}
  1326  
  1327  			var counts []uint64
  1328  			done := make(chan struct{})
  1329  
  1330  			go func() {
  1331  				counts, err = bareMinimumChain.processMessagesToBlocks()
  1332  				done <- struct{}{}
  1333  			}()
  1334  
  1335  			// This is the wrappedMessage that the for-loop will process
  1336  			mpc.YieldMessage(newMockConsumerMessage(newConnectMessage()))
  1337  
  1338  			logger.Debug("Closing haltChan to exit the infinite for-loop")
  1339  			close(haltChan) // Identical to chain.Halt()
  1340  			logger.Debug("haltChan closed")
  1341  			<-done
  1342  
  1343  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1344  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1345  			assert.Equal(t, uint64(1), counts[indexProcessConnectPass], "Expected 1 CONNECT message processed")
  1346  		})
  1347  	})
  1348  
  1349  	t.Run("Regular", func(t *testing.T) {
  1350  		t.Run("Error", func(t *testing.T) {
  1351  			errorChan := make(chan struct{})
  1352  			close(errorChan)
  1353  			haltChan := make(chan struct{})
  1354  
  1355  			mockSupport := &mockmultichannel.ConsenterSupport{
  1356  				ChannelIDVal: mockChannel.topic(),
  1357  			}
  1358  
  1359  			bareMinimumChain := &chainImpl{
  1360  				parentConsumer:  mockParentConsumer,
  1361  				channelConsumer: mockChannelConsumer,
  1362  
  1363  				channel:          mockChannel,
  1364  				ConsenterSupport: mockSupport,
  1365  
  1366  				errorChan:                      errorChan,
  1367  				haltChan:                       haltChan,
  1368  				doneProcessingMessagesToBlocks: make(chan struct{}),
  1369  			}
  1370  
  1371  			var counts []uint64
  1372  			done := make(chan struct{})
  1373  
  1374  			go func() {
  1375  				counts, err = bareMinimumChain.processMessagesToBlocks()
  1376  				done <- struct{}{}
  1377  			}()
  1378  
  1379  			// This is the wrappedMessage that the for-loop will process
  1380  			mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(tamperBytes(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage"))))))
  1381  
  1382  			logger.Debug("Closing haltChan to exit the infinite for-loop")
  1383  			close(haltChan) // Identical to chain.Halt()
  1384  			logger.Debug("haltChan closed")
  1385  			<-done
  1386  
  1387  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1388  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1389  			assert.Equal(t, uint64(1), counts[indexProcessRegularError], "Expected 1 damaged REGULAR message processed")
  1390  		})
  1391  
  1392  		// This ensures regular kafka messages of type UNKNOWN are handled properly
  1393  		t.Run("Unknown", func(t *testing.T) {
  1394  			t.Run("Enqueue", func(t *testing.T) {
  1395  				errorChan := make(chan struct{})
  1396  				close(errorChan)
  1397  				haltChan := make(chan struct{})
  1398  
  1399  				lastCutBlockNumber := uint64(3)
  1400  
  1401  				mockSupport := &mockmultichannel.ConsenterSupport{
  1402  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  1403  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  1404  					ChannelIDVal:    mockChannel.topic(),
  1405  					HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  1406  					SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  1407  				}
  1408  				defer close(mockSupport.BlockCutterVal.Block)
  1409  
  1410  				bareMinimumChain := &chainImpl{
  1411  					parentConsumer:  mockParentConsumer,
  1412  					channelConsumer: mockChannelConsumer,
  1413  
  1414  					channel:            mockChannel,
  1415  					ConsenterSupport:   mockSupport,
  1416  					lastCutBlockNumber: lastCutBlockNumber,
  1417  
  1418  					errorChan:                      errorChan,
  1419  					haltChan:                       haltChan,
  1420  					doneProcessingMessagesToBlocks: make(chan struct{}),
  1421  				}
  1422  
  1423  				var counts []uint64
  1424  				done := make(chan struct{})
  1425  
  1426  				go func() {
  1427  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1428  					done <- struct{}{}
  1429  				}()
  1430  
  1431  				// This is the wrappedMessage that the for-loop will process
  1432  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")))))
  1433  
  1434  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return
  1435  				logger.Debugf("Mock blockcutter's Ordered call has returned")
  1436  
  1437  				logger.Debug("Closing haltChan to exit the infinite for-loop")
  1438  				// We are guaranteed to hit the haltChan branch after hitting the REGULAR branch at least once
  1439  				close(haltChan) // Identical to chain.Halt()
  1440  				logger.Debug("haltChan closed")
  1441  				<-done
  1442  
  1443  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1444  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1445  				assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  1446  			})
  1447  
  1448  			t.Run("CutBlock", func(t *testing.T) {
  1449  				errorChan := make(chan struct{})
  1450  				close(errorChan)
  1451  				haltChan := make(chan struct{})
  1452  
  1453  				lastCutBlockNumber := uint64(3)
  1454  
  1455  				mockSupport := &mockmultichannel.ConsenterSupport{
  1456  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  1457  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  1458  					ChannelIDVal:    mockChannel.topic(),
  1459  					HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  1460  					SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  1461  				}
  1462  				defer close(mockSupport.BlockCutterVal.Block)
  1463  
  1464  				fakeLastOffsetPersisted := &mockkafka.MetricsGauge{}
  1465  				fakeLastOffsetPersisted.WithReturns(fakeLastOffsetPersisted)
  1466  				mockConsenter.(*consenterImpl).metrics.LastOffsetPersisted = fakeLastOffsetPersisted
  1467  
  1468  				bareMinimumChain := &chainImpl{
  1469  					parentConsumer:  mockParentConsumer,
  1470  					channelConsumer: mockChannelConsumer,
  1471  
  1472  					consenter:          mockConsenter,
  1473  					channel:            mockChannel,
  1474  					ConsenterSupport:   mockSupport,
  1475  					lastCutBlockNumber: lastCutBlockNumber,
  1476  
  1477  					errorChan:                      errorChan,
  1478  					haltChan:                       haltChan,
  1479  					doneProcessingMessagesToBlocks: make(chan struct{})}
  1480  
  1481  				var counts []uint64
  1482  				done := make(chan struct{})
  1483  
  1484  				go func() {
  1485  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1486  					done <- struct{}{}
  1487  				}()
  1488  
  1489  				mockSupport.BlockCutterVal.CutNext = true
  1490  
  1491  				// This is the wrappedMessage that the for-loop will process
  1492  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")))))
  1493  
  1494  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return
  1495  				logger.Debugf("Mock blockcutter's Ordered call has returned")
  1496  				<-mockSupport.Blocks // Let the `mockConsenterSupport.WriteBlock` proceed
  1497  
  1498  				logger.Debug("Closing haltChan to exit the infinite for-loop")
  1499  				close(haltChan) // Identical to chain.Halt()
  1500  				logger.Debug("haltChan closed")
  1501  				<-done
  1502  
  1503  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1504  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1505  				assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  1506  				assert.Equal(t, lastCutBlockNumber+1, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to be bumped up by one")
  1507  
  1508  				require.Equal(t, fakeLastOffsetPersisted.WithCallCount(), 1)
  1509  				assert.Equal(t, fakeLastOffsetPersisted.WithArgsForCall(0), []string{"channel", "mockChannelFoo"})
  1510  				require.Equal(t, fakeLastOffsetPersisted.SetCallCount(), 1)
  1511  				assert.Equal(t, fakeLastOffsetPersisted.SetArgsForCall(0), float64(9))
  1512  			})
  1513  
  1514  			// This test ensures the corner case in FAB-5709 is taken care of
  1515  			t.Run("SecondTxOverflows", func(t *testing.T) {
  1516  				if testing.Short() {
  1517  					t.Skip("Skipping test in short mode")
  1518  				}
  1519  
  1520  				errorChan := make(chan struct{})
  1521  				close(errorChan)
  1522  				haltChan := make(chan struct{})
  1523  
  1524  				lastCutBlockNumber := uint64(3)
  1525  
  1526  				mockSupport := &mockmultichannel.ConsenterSupport{
  1527  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  1528  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  1529  					ChannelIDVal:    mockChannel.topic(),
  1530  					HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  1531  					SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  1532  				}
  1533  				defer close(mockSupport.BlockCutterVal.Block)
  1534  
  1535  				bareMinimumChain := &chainImpl{
  1536  					parentConsumer:  mockParentConsumer,
  1537  					channelConsumer: mockChannelConsumer,
  1538  
  1539  					consenter:          mockConsenter,
  1540  					channel:            mockChannel,
  1541  					ConsenterSupport:   mockSupport,
  1542  					lastCutBlockNumber: lastCutBlockNumber,
  1543  
  1544  					errorChan:                      errorChan,
  1545  					haltChan:                       haltChan,
  1546  					doneProcessingMessagesToBlocks: make(chan struct{}),
  1547  				}
  1548  
  1549  				var counts []uint64
  1550  				done := make(chan struct{})
  1551  
  1552  				go func() {
  1553  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1554  					done <- struct{}{}
  1555  				}()
  1556  
  1557  				var block1, block2 *cb.Block
  1558  
  1559  				block1LastOffset := mpc.HighWaterMarkOffset()
  1560  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")))))
  1561  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return
  1562  
  1563  				// Set CutAncestors to true so that second message overflows receiver batch
  1564  				mockSupport.BlockCutterVal.CutAncestors = true
  1565  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")))))
  1566  				mockSupport.BlockCutterVal.Block <- struct{}{}
  1567  
  1568  				select {
  1569  				case block1 = <-mockSupport.Blocks: // Let the `mockConsenterSupport.WriteBlock` proceed
  1570  				case <-time.After(shortTimeout):
  1571  					logger.Fatalf("Did not receive a block from the blockcutter as expected")
  1572  				}
  1573  
  1574  				// Set CutNext to true to flush all pending messages
  1575  				mockSupport.BlockCutterVal.CutAncestors = false
  1576  				mockSupport.BlockCutterVal.CutNext = true
  1577  				block2LastOffset := mpc.HighWaterMarkOffset()
  1578  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")))))
  1579  				mockSupport.BlockCutterVal.Block <- struct{}{}
  1580  
  1581  				select {
  1582  				case block2 = <-mockSupport.Blocks:
  1583  				case <-time.After(shortTimeout):
  1584  					logger.Fatalf("Did not receive a block from the blockcutter as expected")
  1585  				}
  1586  
  1587  				logger.Debug("Closing haltChan to exit the infinite for-loop")
  1588  				close(haltChan) // Identical to chain.Halt()
  1589  				logger.Debug("haltChan closed")
  1590  				<-done
  1591  
  1592  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1593  				assert.Equal(t, uint64(3), counts[indexRecvPass], "Expected 2 messages received and unmarshaled")
  1594  				assert.Equal(t, uint64(3), counts[indexProcessRegularPass], "Expected 2 REGULAR messages processed")
  1595  				assert.Equal(t, lastCutBlockNumber+2, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to be bumped up by two")
  1596  				assert.Equal(t, block1LastOffset, extractEncodedOffset(block1.GetMetadata().Metadata[cb.BlockMetadataIndex_ORDERER]), "Expected encoded offset in first block to be %d", block1LastOffset)
  1597  				assert.Equal(t, block2LastOffset, extractEncodedOffset(block2.GetMetadata().Metadata[cb.BlockMetadataIndex_ORDERER]), "Expected encoded offset in second block to be %d", block2LastOffset)
  1598  			})
  1599  
  1600  			t.Run("InvalidConfigEnv", func(t *testing.T) {
  1601  				errorChan := make(chan struct{})
  1602  				close(errorChan)
  1603  				haltChan := make(chan struct{})
  1604  
  1605  				lastCutBlockNumber := uint64(3)
  1606  
  1607  				mockSupport := &mockmultichannel.ConsenterSupport{
  1608  					Blocks:              make(chan *cb.Block), // WriteBlock will post here
  1609  					BlockCutterVal:      mockblockcutter.NewReceiver(),
  1610  					ChannelIDVal:        mockChannel.topic(),
  1611  					HeightVal:           lastCutBlockNumber, // Incremented during the WriteBlock call
  1612  					ClassifyMsgVal:      msgprocessor.ConfigMsg,
  1613  					ProcessConfigMsgErr: fmt.Errorf("Invalid config message"),
  1614  					SharedConfigVal:     newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  1615  				}
  1616  				defer close(mockSupport.BlockCutterVal.Block)
  1617  
  1618  				bareMinimumChain := &chainImpl{
  1619  					parentConsumer:  mockParentConsumer,
  1620  					channelConsumer: mockChannelConsumer,
  1621  
  1622  					channel:            mockChannel,
  1623  					ConsenterSupport:   mockSupport,
  1624  					lastCutBlockNumber: lastCutBlockNumber,
  1625  
  1626  					errorChan:                      errorChan,
  1627  					haltChan:                       haltChan,
  1628  					doneProcessingMessagesToBlocks: make(chan struct{}),
  1629  				}
  1630  
  1631  				var counts []uint64
  1632  				done := make(chan struct{})
  1633  
  1634  				go func() {
  1635  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1636  					done <- struct{}{}
  1637  				}()
  1638  
  1639  				// This is the config wrappedMessage that the for-loop will process.
  1640  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockConfigEnvelope()))))
  1641  
  1642  				logger.Debug("Closing haltChan to exit the infinite for-loop")
  1643  				// We are guaranteed to hit the haltChan branch after hitting the REGULAR branch at least once
  1644  				close(haltChan) // Identical to chain.Halt()
  1645  				logger.Debug("haltChan closed")
  1646  				<-done
  1647  
  1648  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1649  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1650  				assert.Equal(t, uint64(1), counts[indexProcessRegularError], "Expected 1 REGULAR message error")
  1651  				assert.Equal(t, lastCutBlockNumber, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber not to be incremented")
  1652  			})
  1653  
  1654  			t.Run("InvalidOrdererTxEnv", func(t *testing.T) {
  1655  				errorChan := make(chan struct{})
  1656  				close(errorChan)
  1657  				haltChan := make(chan struct{})
  1658  
  1659  				lastCutBlockNumber := uint64(3)
  1660  
  1661  				mockSupport := &mockmultichannel.ConsenterSupport{
  1662  					Blocks:              make(chan *cb.Block), // WriteBlock will post here
  1663  					BlockCutterVal:      mockblockcutter.NewReceiver(),
  1664  					ChannelIDVal:        mockChannel.topic(),
  1665  					HeightVal:           lastCutBlockNumber, // Incremented during the WriteBlock call
  1666  					ClassifyMsgVal:      msgprocessor.ConfigMsg,
  1667  					ProcessConfigMsgErr: fmt.Errorf("Invalid config message"),
  1668  					SharedConfigVal:     newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  1669  				}
  1670  				defer close(mockSupport.BlockCutterVal.Block)
  1671  
  1672  				bareMinimumChain := &chainImpl{
  1673  					parentConsumer:  mockParentConsumer,
  1674  					channelConsumer: mockChannelConsumer,
  1675  
  1676  					channel:            mockChannel,
  1677  					ConsenterSupport:   mockSupport,
  1678  					lastCutBlockNumber: lastCutBlockNumber,
  1679  
  1680  					errorChan:                      errorChan,
  1681  					haltChan:                       haltChan,
  1682  					doneProcessingMessagesToBlocks: make(chan struct{}),
  1683  				}
  1684  
  1685  				var counts []uint64
  1686  				done := make(chan struct{})
  1687  
  1688  				go func() {
  1689  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1690  					done <- struct{}{}
  1691  				}()
  1692  
  1693  				// This is the config wrappedMessage that the for-loop will process.
  1694  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockOrdererTxEnvelope()))))
  1695  
  1696  				logger.Debug("Closing haltChan to exit the infinite for-loop")
  1697  				// We are guaranteed to hit the haltChan branch after hitting the REGULAR branch at least once
  1698  				close(haltChan) // Identical to chain.Halt()
  1699  				logger.Debug("haltChan closed")
  1700  				<-done
  1701  
  1702  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1703  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1704  				assert.Equal(t, uint64(1), counts[indexProcessRegularError], "Expected 1 REGULAR message error")
  1705  				assert.Equal(t, lastCutBlockNumber, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber not to be incremented")
  1706  			})
  1707  
  1708  			t.Run("InvalidNormalEnv", func(t *testing.T) {
  1709  				errorChan := make(chan struct{})
  1710  				close(errorChan)
  1711  				haltChan := make(chan struct{})
  1712  
  1713  				lastCutBlockNumber := uint64(3)
  1714  
  1715  				mockSupport := &mockmultichannel.ConsenterSupport{
  1716  					Blocks:              make(chan *cb.Block), // WriteBlock will post here
  1717  					BlockCutterVal:      mockblockcutter.NewReceiver(),
  1718  					ChannelIDVal:        mockChannel.topic(),
  1719  					HeightVal:           lastCutBlockNumber, // Incremented during the WriteBlock call
  1720  					SharedConfigVal:     newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  1721  					ProcessNormalMsgErr: fmt.Errorf("Invalid normal message"),
  1722  				}
  1723  				defer close(mockSupport.BlockCutterVal.Block)
  1724  
  1725  				bareMinimumChain := &chainImpl{
  1726  					parentConsumer:  mockParentConsumer,
  1727  					channelConsumer: mockChannelConsumer,
  1728  
  1729  					channel:            mockChannel,
  1730  					ConsenterSupport:   mockSupport,
  1731  					lastCutBlockNumber: lastCutBlockNumber,
  1732  
  1733  					errorChan:                      errorChan,
  1734  					haltChan:                       haltChan,
  1735  					doneProcessingMessagesToBlocks: make(chan struct{}),
  1736  				}
  1737  
  1738  				var counts []uint64
  1739  				done := make(chan struct{})
  1740  
  1741  				go func() {
  1742  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1743  					done <- struct{}{}
  1744  				}()
  1745  
  1746  				// This is the wrappedMessage that the for-loop will process
  1747  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")))))
  1748  
  1749  				close(haltChan) // Identical to chain.Halt()
  1750  				<-done
  1751  
  1752  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1753  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1754  				assert.Equal(t, uint64(1), counts[indexProcessRegularError], "Expected 1 REGULAR message processed")
  1755  			})
  1756  
  1757  			t.Run("CutConfigEnv", func(t *testing.T) {
  1758  				errorChan := make(chan struct{})
  1759  				close(errorChan)
  1760  				haltChan := make(chan struct{})
  1761  
  1762  				lastCutBlockNumber := uint64(3)
  1763  
  1764  				mockSupport := &mockmultichannel.ConsenterSupport{
  1765  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  1766  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  1767  					ChannelIDVal:    mockChannel.topic(),
  1768  					HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  1769  					SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  1770  					ClassifyMsgVal:  msgprocessor.ConfigMsg,
  1771  				}
  1772  				defer close(mockSupport.BlockCutterVal.Block)
  1773  
  1774  				bareMinimumChain := &chainImpl{
  1775  					parentConsumer:  mockParentConsumer,
  1776  					channelConsumer: mockChannelConsumer,
  1777  
  1778  					consenter:          mockConsenter,
  1779  					channel:            mockChannel,
  1780  					ConsenterSupport:   mockSupport,
  1781  					lastCutBlockNumber: lastCutBlockNumber,
  1782  
  1783  					errorChan:                      errorChan,
  1784  					haltChan:                       haltChan,
  1785  					doneProcessingMessagesToBlocks: make(chan struct{}),
  1786  				}
  1787  
  1788  				var counts []uint64
  1789  				done := make(chan struct{})
  1790  
  1791  				go func() {
  1792  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1793  					done <- struct{}{}
  1794  				}()
  1795  
  1796  				configBlkOffset := mpc.HighWaterMarkOffset()
  1797  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockConfigEnvelope()))))
  1798  
  1799  				var configBlk *cb.Block
  1800  
  1801  				select {
  1802  				case configBlk = <-mockSupport.Blocks:
  1803  				case <-time.After(shortTimeout):
  1804  					logger.Fatalf("Did not receive a config block from the blockcutter as expected")
  1805  				}
  1806  
  1807  				close(haltChan) // Identical to chain.Halt()
  1808  				<-done
  1809  
  1810  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1811  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1812  				assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  1813  				assert.Equal(t, lastCutBlockNumber+1, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to be incremented by 1")
  1814  				assert.Equal(t, configBlkOffset, extractEncodedOffset(configBlk.GetMetadata().Metadata[cb.BlockMetadataIndex_ORDERER]), "Expected encoded offset in second block to be %d", configBlkOffset)
  1815  			})
  1816  
  1817  			// We are not expecting this type of message from Kafka
  1818  			t.Run("ConfigUpdateEnv", func(t *testing.T) {
  1819  				errorChan := make(chan struct{})
  1820  				close(errorChan)
  1821  				haltChan := make(chan struct{})
  1822  
  1823  				lastCutBlockNumber := uint64(3)
  1824  
  1825  				mockSupport := &mockmultichannel.ConsenterSupport{
  1826  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  1827  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  1828  					ChannelIDVal:    mockChannel.topic(),
  1829  					HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  1830  					SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  1831  					ClassifyMsgVal:  msgprocessor.ConfigUpdateMsg,
  1832  				}
  1833  				defer close(mockSupport.BlockCutterVal.Block)
  1834  
  1835  				bareMinimumChain := &chainImpl{
  1836  					parentConsumer:  mockParentConsumer,
  1837  					channelConsumer: mockChannelConsumer,
  1838  
  1839  					channel:            mockChannel,
  1840  					ConsenterSupport:   mockSupport,
  1841  					lastCutBlockNumber: lastCutBlockNumber,
  1842  
  1843  					errorChan:                      errorChan,
  1844  					haltChan:                       haltChan,
  1845  					doneProcessingMessagesToBlocks: make(chan struct{}),
  1846  				}
  1847  
  1848  				var counts []uint64
  1849  				done := make(chan struct{})
  1850  
  1851  				go func() {
  1852  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1853  					done <- struct{}{}
  1854  				}()
  1855  
  1856  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("FooMessage")))))
  1857  
  1858  				close(haltChan) // Identical to chain.Halt()
  1859  				<-done
  1860  
  1861  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1862  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1863  				assert.Equal(t, uint64(1), counts[indexProcessRegularError], "Expected 1 REGULAR message processed")
  1864  			})
  1865  
  1866  			t.Run("SendTimeToCut", func(t *testing.T) {
  1867  				t.Skip("Skipping test as it introduces a race condition")
  1868  
  1869  				// NB We haven't set a handlermap for the mock broker so we need to set
  1870  				// the ProduceResponse
  1871  				successResponse := new(sarama.ProduceResponse)
  1872  				successResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNoError)
  1873  				mockBroker.Returns(successResponse)
  1874  
  1875  				errorChan := make(chan struct{})
  1876  				close(errorChan)
  1877  				haltChan := make(chan struct{})
  1878  
  1879  				lastCutBlockNumber := uint64(3)
  1880  
  1881  				mockSupport := &mockmultichannel.ConsenterSupport{
  1882  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  1883  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  1884  					ChannelIDVal:    mockChannel.topic(),
  1885  					HeightVal:       lastCutBlockNumber,                                                    // Incremented during the WriteBlock call
  1886  					SharedConfigVal: newMockOrderer(extraShortTimeout, []string{mockBroker.Addr()}, false), // ATTN
  1887  				}
  1888  				defer close(mockSupport.BlockCutterVal.Block)
  1889  
  1890  				bareMinimumChain := &chainImpl{
  1891  					producer:        producer,
  1892  					parentConsumer:  mockParentConsumer,
  1893  					channelConsumer: mockChannelConsumer,
  1894  
  1895  					channel:            mockChannel,
  1896  					ConsenterSupport:   mockSupport,
  1897  					lastCutBlockNumber: lastCutBlockNumber,
  1898  
  1899  					errorChan:                      errorChan,
  1900  					haltChan:                       haltChan,
  1901  					doneProcessingMessagesToBlocks: make(chan struct{}),
  1902  				}
  1903  
  1904  				var counts []uint64
  1905  				done := make(chan struct{})
  1906  
  1907  				go func() {
  1908  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1909  					done <- struct{}{}
  1910  				}()
  1911  
  1912  				// This is the wrappedMessage that the for-loop will process
  1913  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")))))
  1914  
  1915  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return
  1916  				logger.Debugf("Mock blockcutter's Ordered call has returned")
  1917  
  1918  				// Sleep so that the timer branch is activated before the exitChan one.
  1919  				// TODO This is a race condition, will fix in follow-up changeset
  1920  				time.Sleep(hitBranch)
  1921  
  1922  				logger.Debug("Closing haltChan to exit the infinite for-loop")
  1923  				close(haltChan) // Identical to chain.Halt()
  1924  				logger.Debug("haltChan closed")
  1925  				<-done
  1926  
  1927  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  1928  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  1929  				assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  1930  				assert.Equal(t, uint64(1), counts[indexSendTimeToCutPass], "Expected 1 TIMER event processed")
  1931  				assert.Equal(t, lastCutBlockNumber, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same")
  1932  			})
  1933  
  1934  			t.Run("SendTimeToCutError", func(t *testing.T) {
  1935  				// Note that this test is affected by the following parameters:
  1936  				// - Net.ReadTimeout
  1937  				// - Consumer.Retry.Backoff
  1938  				// - Metadata.Retry.Max
  1939  
  1940  				t.Skip("Skipping test as it introduces a race condition")
  1941  
  1942  				// Exact same test as ReceiveRegularAndSendTimeToCut.
  1943  				// Only difference is that the producer's attempt to send a TTC will
  1944  				// fail with an ErrNotEnoughReplicas error.
  1945  				failureResponse := new(sarama.ProduceResponse)
  1946  				failureResponse.AddTopicPartition(mockChannel.topic(), mockChannel.partition(), sarama.ErrNotEnoughReplicas)
  1947  				mockBroker.Returns(failureResponse)
  1948  
  1949  				errorChan := make(chan struct{})
  1950  				close(errorChan)
  1951  				haltChan := make(chan struct{})
  1952  
  1953  				lastCutBlockNumber := uint64(3)
  1954  
  1955  				mockSupport := &mockmultichannel.ConsenterSupport{
  1956  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  1957  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  1958  					ChannelIDVal:    mockChannel.topic(),
  1959  					HeightVal:       lastCutBlockNumber,                                                    // Incremented during the WriteBlock call
  1960  					SharedConfigVal: newMockOrderer(extraShortTimeout, []string{mockBroker.Addr()}, false), // ATTN
  1961  				}
  1962  				defer close(mockSupport.BlockCutterVal.Block)
  1963  
  1964  				bareMinimumChain := &chainImpl{
  1965  					producer:        producer,
  1966  					parentConsumer:  mockParentConsumer,
  1967  					channelConsumer: mockChannelConsumer,
  1968  
  1969  					channel:            mockChannel,
  1970  					ConsenterSupport:   mockSupport,
  1971  					lastCutBlockNumber: lastCutBlockNumber,
  1972  
  1973  					errorChan:                      errorChan,
  1974  					haltChan:                       haltChan,
  1975  					doneProcessingMessagesToBlocks: make(chan struct{}),
  1976  				}
  1977  
  1978  				var counts []uint64
  1979  				done := make(chan struct{})
  1980  
  1981  				go func() {
  1982  					counts, err = bareMinimumChain.processMessagesToBlocks()
  1983  					done <- struct{}{}
  1984  				}()
  1985  
  1986  				// This is the wrappedMessage that the for-loop will process
  1987  				mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")))))
  1988  
  1989  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return
  1990  				logger.Debugf("Mock blockcutter's Ordered call has returned")
  1991  
  1992  				// Sleep so that the timer branch is activated before the exitChan one.
  1993  				// TODO This is a race condition, will fix in follow-up changeset
  1994  				time.Sleep(hitBranch)
  1995  
  1996  				logger.Debug("Closing haltChan to exit the infinite for-loop")
  1997  				close(haltChan) // Identical to chain.Halt()
  1998  				logger.Debug("haltChan closed")
  1999  				<-done
  2000  
  2001  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2002  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  2003  				assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  2004  				assert.Equal(t, uint64(1), counts[indexSendTimeToCutError], "Expected 1 faulty TIMER event processed")
  2005  				assert.Equal(t, lastCutBlockNumber, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to stay the same")
  2006  			})
  2007  		})
  2008  
  2009  		// This ensures regular kafka messages of type NORMAL are handled properly
  2010  		t.Run("Normal", func(t *testing.T) {
  2011  			lastOriginalOffsetProcessed := int64(3)
  2012  
  2013  			t.Run("ReceiveTwoRegularAndCutTwoBlocks", func(t *testing.T) {
  2014  				if testing.Short() {
  2015  					t.Skip("Skipping test in short mode")
  2016  				}
  2017  
  2018  				errorChan := make(chan struct{})
  2019  				close(errorChan)
  2020  				haltChan := make(chan struct{})
  2021  
  2022  				lastCutBlockNumber := uint64(3)
  2023  
  2024  				mockSupport := &mockmultichannel.ConsenterSupport{
  2025  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  2026  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  2027  					ChannelIDVal:    mockChannel.topic(),
  2028  					HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  2029  					SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  2030  					SequenceVal:     uint64(0),
  2031  				}
  2032  				defer close(mockSupport.BlockCutterVal.Block)
  2033  
  2034  				bareMinimumChain := &chainImpl{
  2035  					parentConsumer:  mockParentConsumer,
  2036  					channelConsumer: mockChannelConsumer,
  2037  
  2038  					consenter:                   mockConsenter,
  2039  					channel:                     mockChannel,
  2040  					ConsenterSupport:            mockSupport,
  2041  					lastCutBlockNumber:          lastCutBlockNumber,
  2042  					lastOriginalOffsetProcessed: lastOriginalOffsetProcessed,
  2043  
  2044  					errorChan:                      errorChan,
  2045  					haltChan:                       haltChan,
  2046  					doneProcessingMessagesToBlocks: make(chan struct{}),
  2047  				}
  2048  
  2049  				var counts []uint64
  2050  				done := make(chan struct{})
  2051  
  2052  				go func() {
  2053  					counts, err = bareMinimumChain.processMessagesToBlocks()
  2054  					done <- struct{}{}
  2055  				}()
  2056  
  2057  				var block1, block2 *cb.Block
  2058  
  2059  				// This is the first wrappedMessage that the for-loop will process
  2060  				block1LastOffset := mpc.HighWaterMarkOffset()
  2061  				mpc.YieldMessage(newMockConsumerMessage(newNormalMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")), uint64(0), int64(0))))
  2062  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return
  2063  				logger.Debugf("Mock blockcutter's Ordered call has returned")
  2064  
  2065  				mockSupport.BlockCutterVal.IsolatedTx = true
  2066  
  2067  				// This is the first wrappedMessage that the for-loop will process
  2068  				block2LastOffset := mpc.HighWaterMarkOffset()
  2069  				mpc.YieldMessage(newMockConsumerMessage(newNormalMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")), uint64(0), int64(0))))
  2070  				mockSupport.BlockCutterVal.Block <- struct{}{}
  2071  				logger.Debugf("Mock blockcutter's Ordered call has returned for the second time")
  2072  
  2073  				select {
  2074  				case block1 = <-mockSupport.Blocks: // Let the `mockConsenterSupport.WriteBlock` proceed
  2075  				case <-time.After(shortTimeout):
  2076  					logger.Fatalf("Did not receive a block from the blockcutter as expected")
  2077  				}
  2078  
  2079  				select {
  2080  				case block2 = <-mockSupport.Blocks:
  2081  				case <-time.After(shortTimeout):
  2082  					logger.Fatalf("Did not receive a block from the blockcutter as expected")
  2083  				}
  2084  
  2085  				logger.Debug("Closing haltChan to exit the infinite for-loop")
  2086  				close(haltChan) // Identical to chain.Halt()
  2087  				logger.Debug("haltChan closed")
  2088  				<-done
  2089  
  2090  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2091  				assert.Equal(t, uint64(2), counts[indexRecvPass], "Expected 2 messages received and unmarshaled")
  2092  				assert.Equal(t, uint64(2), counts[indexProcessRegularPass], "Expected 2 REGULAR messages processed")
  2093  				assert.Equal(t, lastCutBlockNumber+2, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to be bumped up by two")
  2094  				assert.Equal(t, block1LastOffset, extractEncodedOffset(block1.GetMetadata().Metadata[cb.BlockMetadataIndex_ORDERER]), "Expected encoded offset in first block to be %d", block1LastOffset)
  2095  				assert.Equal(t, block2LastOffset, extractEncodedOffset(block2.GetMetadata().Metadata[cb.BlockMetadataIndex_ORDERER]), "Expected encoded offset in second block to be %d", block2LastOffset)
  2096  			})
  2097  
  2098  			t.Run("ReceiveRegularAndQueue", func(t *testing.T) {
  2099  				if testing.Short() {
  2100  					t.Skip("Skipping test in short mode")
  2101  				}
  2102  
  2103  				errorChan := make(chan struct{})
  2104  				close(errorChan)
  2105  				haltChan := make(chan struct{})
  2106  
  2107  				lastCutBlockNumber := uint64(3)
  2108  
  2109  				mockSupport := &mockmultichannel.ConsenterSupport{
  2110  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  2111  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  2112  					ChannelIDVal:    mockChannel.topic(),
  2113  					HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  2114  					SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  2115  				}
  2116  				defer close(mockSupport.BlockCutterVal.Block)
  2117  
  2118  				bareMinimumChain := &chainImpl{
  2119  					parentConsumer:  mockParentConsumer,
  2120  					channelConsumer: mockChannelConsumer,
  2121  
  2122  					consenter:                   mockConsenter,
  2123  					channel:                     mockChannel,
  2124  					ConsenterSupport:            mockSupport,
  2125  					lastCutBlockNumber:          lastCutBlockNumber,
  2126  					lastOriginalOffsetProcessed: lastOriginalOffsetProcessed,
  2127  
  2128  					errorChan:                      errorChan,
  2129  					haltChan:                       haltChan,
  2130  					doneProcessingMessagesToBlocks: make(chan struct{}),
  2131  				}
  2132  
  2133  				var counts []uint64
  2134  				done := make(chan struct{})
  2135  
  2136  				go func() {
  2137  					counts, err = bareMinimumChain.processMessagesToBlocks()
  2138  					done <- struct{}{}
  2139  				}()
  2140  
  2141  				mockSupport.BlockCutterVal.CutNext = true
  2142  
  2143  				mpc.YieldMessage(newMockConsumerMessage(newNormalMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")), uint64(0), int64(0))))
  2144  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return
  2145  				<-mockSupport.Blocks
  2146  
  2147  				close(haltChan)
  2148  				logger.Debug("haltChan closed")
  2149  				<-done
  2150  
  2151  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2152  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 2 message received and unmarshaled")
  2153  				assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  2154  			})
  2155  		})
  2156  
  2157  		// This ensures regular kafka messages of type CONFIG are handled properly
  2158  		t.Run("Config", func(t *testing.T) {
  2159  			// This test sends a normal tx, followed by a config tx. It should
  2160  			// immediately cut them into two blocks.
  2161  			t.Run("ReceiveConfigEnvelopeAndCut", func(t *testing.T) {
  2162  				errorChan := make(chan struct{})
  2163  				close(errorChan)
  2164  				haltChan := make(chan struct{})
  2165  
  2166  				lastCutBlockNumber := uint64(3)
  2167  
  2168  				mockSupport := &mockmultichannel.ConsenterSupport{
  2169  					Blocks:          make(chan *cb.Block), // WriteBlock will post here
  2170  					BlockCutterVal:  mockblockcutter.NewReceiver(),
  2171  					ChannelIDVal:    mockChannel.topic(),
  2172  					HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  2173  					SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  2174  				}
  2175  				defer close(mockSupport.BlockCutterVal.Block)
  2176  
  2177  				fakeLastOffsetPersisted := &mockkafka.MetricsGauge{}
  2178  				fakeLastOffsetPersisted.WithReturns(fakeLastOffsetPersisted)
  2179  				mockConsenter.(*consenterImpl).metrics.LastOffsetPersisted = fakeLastOffsetPersisted
  2180  
  2181  				bareMinimumChain := &chainImpl{
  2182  					parentConsumer:  mockParentConsumer,
  2183  					channelConsumer: mockChannelConsumer,
  2184  
  2185  					consenter:          mockConsenter,
  2186  					channel:            mockChannel,
  2187  					ConsenterSupport:   mockSupport,
  2188  					lastCutBlockNumber: lastCutBlockNumber,
  2189  
  2190  					errorChan:                      errorChan,
  2191  					haltChan:                       haltChan,
  2192  					doneProcessingMessagesToBlocks: make(chan struct{}),
  2193  				}
  2194  
  2195  				var counts []uint64
  2196  				done := make(chan struct{})
  2197  
  2198  				go func() {
  2199  					counts, err = bareMinimumChain.processMessagesToBlocks()
  2200  					done <- struct{}{}
  2201  				}()
  2202  
  2203  				normalBlkOffset := mpc.HighWaterMarkOffset()
  2204  				mpc.YieldMessage(newMockConsumerMessage(newNormalMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")), uint64(0), int64(0))))
  2205  				mockSupport.BlockCutterVal.Block <- struct{}{} // Let the `mockblockcutter.Ordered` call return
  2206  
  2207  				configBlkOffset := mpc.HighWaterMarkOffset()
  2208  				mockSupport.ClassifyMsgVal = msgprocessor.ConfigMsg
  2209  				mpc.YieldMessage(newMockConsumerMessage(newConfigMessage(
  2210  					protoutil.MarshalOrPanic(newMockConfigEnvelope()),
  2211  					uint64(0),
  2212  					int64(0))))
  2213  
  2214  				var normalBlk, configBlk *cb.Block
  2215  				select {
  2216  				case normalBlk = <-mockSupport.Blocks:
  2217  				case <-time.After(shortTimeout):
  2218  					logger.Fatalf("Did not receive a normal block from the blockcutter as expected")
  2219  				}
  2220  
  2221  				select {
  2222  				case configBlk = <-mockSupport.Blocks:
  2223  				case <-time.After(shortTimeout):
  2224  					logger.Fatalf("Did not receive a config block from the blockcutter as expected")
  2225  				}
  2226  
  2227  				close(haltChan) // Identical to chain.Halt()
  2228  				<-done
  2229  
  2230  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2231  				assert.Equal(t, uint64(2), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  2232  				assert.Equal(t, uint64(2), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  2233  				assert.Equal(t, lastCutBlockNumber+2, bareMinimumChain.lastCutBlockNumber, "Expected lastCutBlockNumber to be incremented by 2")
  2234  				assert.Equal(t, normalBlkOffset, extractEncodedOffset(normalBlk.GetMetadata().Metadata[cb.BlockMetadataIndex_ORDERER]), "Expected encoded offset in first block to be %d", normalBlkOffset)
  2235  				assert.Equal(t, configBlkOffset, extractEncodedOffset(configBlk.GetMetadata().Metadata[cb.BlockMetadataIndex_ORDERER]), "Expected encoded offset in second block to be %d", configBlkOffset)
  2236  
  2237  				require.Equal(t, fakeLastOffsetPersisted.WithCallCount(), 2)
  2238  				assert.Equal(t, fakeLastOffsetPersisted.WithArgsForCall(0), []string{"channel", "mockChannelFoo"})
  2239  				assert.Equal(t, fakeLastOffsetPersisted.WithArgsForCall(1), []string{"channel", "mockChannelFoo"})
  2240  				require.Equal(t, fakeLastOffsetPersisted.SetCallCount(), 2)
  2241  				assert.Equal(t, fakeLastOffsetPersisted.SetArgsForCall(0), float64(normalBlkOffset))
  2242  				assert.Equal(t, fakeLastOffsetPersisted.SetArgsForCall(1), float64(configBlkOffset))
  2243  			})
  2244  
  2245  			// This ensures config message is re-validated if config seq has advanced
  2246  			t.Run("RevalidateConfigEnvInvalid", func(t *testing.T) {
  2247  				if testing.Short() {
  2248  					t.Skip("Skipping test in short mode")
  2249  				}
  2250  
  2251  				errorChan := make(chan struct{})
  2252  				close(errorChan)
  2253  				haltChan := make(chan struct{})
  2254  
  2255  				lastCutBlockNumber := uint64(3)
  2256  
  2257  				mockSupport := &mockmultichannel.ConsenterSupport{
  2258  					Blocks:              make(chan *cb.Block), // WriteBlock will post here
  2259  					BlockCutterVal:      mockblockcutter.NewReceiver(),
  2260  					ChannelIDVal:        mockChannel.topic(),
  2261  					HeightVal:           lastCutBlockNumber, // Incremented during the WriteBlock call
  2262  					ClassifyMsgVal:      msgprocessor.ConfigMsg,
  2263  					SharedConfigVal:     newMockOrderer(longTimeout, []string{mockBroker.Addr()}, false),
  2264  					SequenceVal:         uint64(1),
  2265  					ProcessConfigMsgErr: fmt.Errorf("Invalid config message"),
  2266  				}
  2267  				defer close(mockSupport.BlockCutterVal.Block)
  2268  
  2269  				bareMinimumChain := &chainImpl{
  2270  					parentConsumer:  mockParentConsumer,
  2271  					channelConsumer: mockChannelConsumer,
  2272  
  2273  					channel:            mockChannel,
  2274  					ConsenterSupport:   mockSupport,
  2275  					lastCutBlockNumber: lastCutBlockNumber,
  2276  
  2277  					errorChan:                      errorChan,
  2278  					haltChan:                       haltChan,
  2279  					doneProcessingMessagesToBlocks: make(chan struct{}),
  2280  				}
  2281  
  2282  				var counts []uint64
  2283  				done := make(chan struct{})
  2284  
  2285  				go func() {
  2286  					counts, err = bareMinimumChain.processMessagesToBlocks()
  2287  					done <- struct{}{}
  2288  				}()
  2289  
  2290  				mpc.YieldMessage(newMockConsumerMessage(newConfigMessage(
  2291  					protoutil.MarshalOrPanic(newMockConfigEnvelope()),
  2292  					uint64(0),
  2293  					int64(0))))
  2294  				select {
  2295  				case <-mockSupport.Blocks:
  2296  					t.Fatalf("Expected no block being cut given invalid config message")
  2297  				case <-time.After(shortTimeout):
  2298  				}
  2299  
  2300  				close(haltChan) // Identical to chain.Halt()
  2301  				<-done
  2302  
  2303  				assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2304  				assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  2305  				assert.Equal(t, uint64(1), counts[indexProcessRegularError], "Expected 1 REGULAR message error")
  2306  			})
  2307  		})
  2308  	})
  2309  
  2310  	t.Run("KafkaError", func(t *testing.T) {
  2311  		t.Run("ReceiveKafkaErrorAndCloseErrorChan", func(t *testing.T) {
  2312  			// If we set up the mock broker so that it returns a response, if the
  2313  			// test finishes before the sendConnectMessage goroutine has received
  2314  			// this response, we will get a failure ("not all expectations were
  2315  			// satisfied") from the mock broker. So we sabotage the producer.
  2316  			failedProducer, _ := sarama.NewSyncProducer([]string{}, mockBrokerConfig)
  2317  
  2318  			// We need to have the sendConnectMessage goroutine die instantaneously,
  2319  			// otherwise we'll get a nil pointer dereference panic. We are
  2320  			// exploiting the admittedly hacky shortcut where a retriable process
  2321  			// returns immediately when given the nil time.Duration value for its
  2322  			// ticker.
  2323  			zeroRetryConsenter := &consenterImpl{}
  2324  
  2325  			// Let's assume an open errorChan, i.e. a healthy link between the
  2326  			// consumer and the Kafka partition corresponding to the channel
  2327  			errorChan := make(chan struct{})
  2328  
  2329  			haltChan := make(chan struct{})
  2330  
  2331  			mockSupport := &mockmultichannel.ConsenterSupport{
  2332  				ChannelIDVal: mockChannel.topic(),
  2333  			}
  2334  
  2335  			bareMinimumChain := &chainImpl{
  2336  				consenter:       zeroRetryConsenter, // For sendConnectMessage
  2337  				producer:        failedProducer,     // For sendConnectMessage
  2338  				parentConsumer:  mockParentConsumer,
  2339  				channelConsumer: mockChannelConsumer,
  2340  
  2341  				channel:          mockChannel,
  2342  				ConsenterSupport: mockSupport,
  2343  
  2344  				errorChan:                      errorChan,
  2345  				haltChan:                       haltChan,
  2346  				doneProcessingMessagesToBlocks: make(chan struct{}),
  2347  			}
  2348  
  2349  			var counts []uint64
  2350  			done := make(chan struct{})
  2351  
  2352  			go func() {
  2353  				counts, err = bareMinimumChain.processMessagesToBlocks()
  2354  				done <- struct{}{}
  2355  			}()
  2356  
  2357  			// This is what the for-loop will process
  2358  			mpc.YieldError(fmt.Errorf("fooError"))
  2359  
  2360  			logger.Debug("Closing haltChan to exit the infinite for-loop")
  2361  			close(haltChan) // Identical to chain.Halt()
  2362  			logger.Debug("haltChan closed")
  2363  			<-done
  2364  
  2365  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2366  			assert.Equal(t, uint64(1), counts[indexRecvError], "Expected 1 Kafka error received")
  2367  
  2368  			select {
  2369  			case <-bareMinimumChain.errorChan:
  2370  				logger.Debug("errorChan is closed as it should be")
  2371  			default:
  2372  				t.Fatal("errorChan should have been closed")
  2373  			}
  2374  		})
  2375  
  2376  		t.Run("ReceiveKafkaErrorAndThenReceiveRegularMessage", func(t *testing.T) {
  2377  			t.Skip("Skipping test as it introduces a race condition")
  2378  
  2379  			// If we set up the mock broker so that it returns a response, if the
  2380  			// test finishes before the sendConnectMessage goroutine has received
  2381  			// this response, we will get a failure ("not all expectations were
  2382  			// satisfied") from the mock broker. So we sabotage the producer.
  2383  			failedProducer, _ := sarama.NewSyncProducer([]string{}, mockBrokerConfig)
  2384  
  2385  			// We need to have the sendConnectMessage goroutine die instantaneously,
  2386  			// otherwise we'll get a nil pointer dereference panic. We are
  2387  			// exploiting the admittedly hacky shortcut where a retriable process
  2388  			// returns immediately when given the nil time.Duration value for its
  2389  			// ticker.
  2390  			zeroRetryConsenter := &consenterImpl{}
  2391  
  2392  			// If the errorChan is closed already, the kafkaErr branch shouldn't
  2393  			// touch it
  2394  			errorChan := make(chan struct{})
  2395  			close(errorChan)
  2396  
  2397  			haltChan := make(chan struct{})
  2398  
  2399  			mockSupport := &mockmultichannel.ConsenterSupport{
  2400  				ChannelIDVal: mockChannel.topic(),
  2401  			}
  2402  
  2403  			bareMinimumChain := &chainImpl{
  2404  				consenter:       zeroRetryConsenter, // For sendConnectMessage
  2405  				producer:        failedProducer,     // For sendConnectMessage
  2406  				parentConsumer:  mockParentConsumer,
  2407  				channelConsumer: mockChannelConsumer,
  2408  
  2409  				channel:          mockChannel,
  2410  				ConsenterSupport: mockSupport,
  2411  
  2412  				errorChan:                      errorChan,
  2413  				haltChan:                       haltChan,
  2414  				doneProcessingMessagesToBlocks: make(chan struct{}),
  2415  			}
  2416  
  2417  			done := make(chan struct{})
  2418  
  2419  			go func() {
  2420  				_, err = bareMinimumChain.processMessagesToBlocks()
  2421  				done <- struct{}{}
  2422  			}()
  2423  
  2424  			// This is what the for-loop will process
  2425  			mpc.YieldError(fmt.Errorf("foo"))
  2426  
  2427  			// We tested this in ReceiveKafkaErrorAndCloseErrorChan, so this check
  2428  			// is redundant in that regard. We use it however to ensure the
  2429  			// kafkaErrBranch has been activated before proceeding with pushing the
  2430  			// regular message.
  2431  			select {
  2432  			case <-bareMinimumChain.errorChan:
  2433  				logger.Debug("errorChan is closed as it should be")
  2434  			case <-time.After(shortTimeout):
  2435  				t.Fatal("errorChan should have been closed by now")
  2436  			}
  2437  
  2438  			// This is the wrappedMessage that the for-loop will process. We use
  2439  			// a broken regular message here on purpose since this is the shortest
  2440  			// path and it allows us to test what we want.
  2441  			mpc.YieldMessage(newMockConsumerMessage(newRegularMessage(tamperBytes(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage"))))))
  2442  
  2443  			// Sleep so that the Messages/errorChan branch is activated.
  2444  			// TODO Hacky approach, will need to revise eventually
  2445  			time.Sleep(hitBranch)
  2446  
  2447  			// Check that the errorChan was recreated
  2448  			select {
  2449  			case <-bareMinimumChain.errorChan:
  2450  				t.Fatal("errorChan should have been open")
  2451  			default:
  2452  				logger.Debug("errorChan is open as it should be")
  2453  			}
  2454  
  2455  			logger.Debug("Closing haltChan to exit the infinite for-loop")
  2456  			close(haltChan) // Identical to chain.Halt()
  2457  			logger.Debug("haltChan closed")
  2458  			<-done
  2459  		})
  2460  	})
  2461  }
  2462  
  2463  // This ensures message is re-validated if config seq has advanced
  2464  func TestResubmission(t *testing.T) {
  2465  	blockIngressMsg := func(t *testing.T, block bool, fn func() error) {
  2466  		wait := make(chan struct{})
  2467  		go func() {
  2468  			fn()
  2469  			wait <- struct{}{}
  2470  		}()
  2471  
  2472  		select {
  2473  		case <-wait:
  2474  			if block {
  2475  				t.Fatalf("Expected WaitReady to block")
  2476  			}
  2477  		case <-time.After(100 * time.Millisecond):
  2478  			if !block {
  2479  				t.Fatalf("Expected WaitReady not to block")
  2480  			}
  2481  		}
  2482  	}
  2483  
  2484  	mockBroker := sarama.NewMockBroker(t, 0)
  2485  	defer func() { mockBroker.Close() }()
  2486  
  2487  	mockChannel := newChannel(channelNameForTest(t), defaultPartition)
  2488  	mockBrokerConfigCopy := *mockBrokerConfig
  2489  	mockBrokerConfigCopy.ChannelBufferSize = 0
  2490  
  2491  	mockParentConsumer := mocks.NewConsumer(t, &mockBrokerConfigCopy)
  2492  	mpc := mockParentConsumer.ExpectConsumePartition(mockChannel.topic(), mockChannel.partition(), int64(0))
  2493  	mockChannelConsumer, err := mockParentConsumer.ConsumePartition(mockChannel.topic(), mockChannel.partition(), int64(0))
  2494  	assert.NoError(t, err, "Expected no error when setting up the mock partition consumer")
  2495  
  2496  	t.Run("Normal", func(t *testing.T) {
  2497  		// This test lets kafka emit a mock re-submitted message that does not require reprocessing
  2498  		// (by setting OriginalOffset <= lastOriginalOffsetProcessed)
  2499  		t.Run("AlreadyProcessedDiscard", func(t *testing.T) {
  2500  			if testing.Short() {
  2501  				t.Skip("Skipping test in short mode")
  2502  			}
  2503  
  2504  			errorChan := make(chan struct{})
  2505  			close(errorChan)
  2506  			haltChan := make(chan struct{})
  2507  
  2508  			lastCutBlockNumber := uint64(3)
  2509  			lastOriginalOffsetProcessed := int64(3)
  2510  
  2511  			mockSupport := &mockmultichannel.ConsenterSupport{
  2512  				Blocks:          make(chan *cb.Block), // WriteBlock will post here
  2513  				BlockCutterVal:  mockblockcutter.NewReceiver(),
  2514  				ChannelIDVal:    mockChannel.topic(),
  2515  				HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  2516  				SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, true),
  2517  			}
  2518  			defer close(mockSupport.BlockCutterVal.Block)
  2519  
  2520  			bareMinimumChain := &chainImpl{
  2521  				parentConsumer:  mockParentConsumer,
  2522  				channelConsumer: mockChannelConsumer,
  2523  
  2524  				channel:                     mockChannel,
  2525  				ConsenterSupport:            mockSupport,
  2526  				lastCutBlockNumber:          lastCutBlockNumber,
  2527  				lastOriginalOffsetProcessed: lastOriginalOffsetProcessed,
  2528  
  2529  				errorChan:                      errorChan,
  2530  				haltChan:                       haltChan,
  2531  				doneProcessingMessagesToBlocks: make(chan struct{}),
  2532  			}
  2533  
  2534  			var counts []uint64
  2535  			done := make(chan struct{})
  2536  
  2537  			go func() {
  2538  				counts, err = bareMinimumChain.processMessagesToBlocks()
  2539  				done <- struct{}{}
  2540  			}()
  2541  
  2542  			mockSupport.BlockCutterVal.CutNext = true
  2543  
  2544  			mpc.YieldMessage(newMockConsumerMessage(newNormalMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")), uint64(0), int64(2))))
  2545  
  2546  			select {
  2547  			case <-mockSupport.Blocks:
  2548  				t.Fatalf("Expected no block being cut")
  2549  			case <-time.After(shortTimeout):
  2550  			}
  2551  
  2552  			close(haltChan)
  2553  			logger.Debug("haltChan closed")
  2554  			<-done
  2555  
  2556  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2557  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 2 message received and unmarshaled")
  2558  			assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  2559  		})
  2560  
  2561  		// This test lets kafka emit a mock re-submitted message that requires reprocessing
  2562  		// (by setting OriginalOffset > lastOriginalOffsetProcessed)
  2563  		// Two normal messages are enqueued in this test case: reprossed normal message where
  2564  		// `originalOffset` is not 0, followed by a normal msg  where `originalOffset` is 0.
  2565  		// It tests the case that even no block is cut, `lastOriginalOffsetProcessed` is still
  2566  		// updated. We inspect the block to verify correct `LastOriginalOffsetProcessed` in the
  2567  		// kafka metadata.
  2568  		t.Run("ResubmittedMsgEnqueue", func(t *testing.T) {
  2569  			if testing.Short() {
  2570  				t.Skip("Skipping test in short mode")
  2571  			}
  2572  
  2573  			errorChan := make(chan struct{})
  2574  			close(errorChan)
  2575  			haltChan := make(chan struct{})
  2576  
  2577  			lastCutBlockNumber := uint64(3)
  2578  			lastOriginalOffsetProcessed := int64(3)
  2579  
  2580  			mockSupport := &mockmultichannel.ConsenterSupport{
  2581  				Blocks:          make(chan *cb.Block), // WriteBlock will post here
  2582  				BlockCutterVal:  mockblockcutter.NewReceiver(),
  2583  				ChannelIDVal:    mockChannel.topic(),
  2584  				HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  2585  				SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, true),
  2586  				SequenceVal:     uint64(0),
  2587  			}
  2588  			defer close(mockSupport.BlockCutterVal.Block)
  2589  
  2590  			bareMinimumChain := &chainImpl{
  2591  				parentConsumer:  mockParentConsumer,
  2592  				channelConsumer: mockChannelConsumer,
  2593  
  2594  				consenter:                   mockConsenter,
  2595  				channel:                     mockChannel,
  2596  				ConsenterSupport:            mockSupport,
  2597  				lastCutBlockNumber:          lastCutBlockNumber,
  2598  				lastOriginalOffsetProcessed: lastOriginalOffsetProcessed,
  2599  
  2600  				errorChan:                      errorChan,
  2601  				haltChan:                       haltChan,
  2602  				doneProcessingMessagesToBlocks: make(chan struct{}),
  2603  			}
  2604  
  2605  			var counts []uint64
  2606  			done := make(chan struct{})
  2607  
  2608  			go func() {
  2609  				counts, err = bareMinimumChain.processMessagesToBlocks()
  2610  				done <- struct{}{}
  2611  			}()
  2612  
  2613  			mpc.YieldMessage(newMockConsumerMessage(newNormalMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")), uint64(0), int64(4))))
  2614  			mockSupport.BlockCutterVal.Block <- struct{}{}
  2615  
  2616  			select {
  2617  			case <-mockSupport.Blocks:
  2618  				t.Fatalf("Expected no block to be cut")
  2619  			case <-time.After(shortTimeout):
  2620  			}
  2621  
  2622  			mockSupport.BlockCutterVal.CutNext = true
  2623  			mpc.YieldMessage(newMockConsumerMessage(newNormalMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")), uint64(0), int64(0))))
  2624  			mockSupport.BlockCutterVal.Block <- struct{}{}
  2625  
  2626  			select {
  2627  			case block := <-mockSupport.Blocks:
  2628  				metadata := &cb.Metadata{}
  2629  				proto.Unmarshal(block.Metadata.Metadata[cb.BlockMetadataIndex_ORDERER], metadata)
  2630  				kafkaMetadata := &ab.KafkaMetadata{}
  2631  				proto.Unmarshal(metadata.Value, kafkaMetadata)
  2632  				assert.Equal(t, kafkaMetadata.LastOriginalOffsetProcessed, int64(4))
  2633  			case <-time.After(shortTimeout):
  2634  				t.Fatalf("Expected one block being cut")
  2635  			}
  2636  
  2637  			close(haltChan)
  2638  			logger.Debug("haltChan closed")
  2639  			<-done
  2640  
  2641  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2642  			assert.Equal(t, uint64(2), counts[indexRecvPass], "Expected 2 message received and unmarshaled")
  2643  			assert.Equal(t, uint64(2), counts[indexProcessRegularPass], "Expected 2 REGULAR message processed")
  2644  		})
  2645  
  2646  		t.Run("InvalidDiscard", func(t *testing.T) {
  2647  			if testing.Short() {
  2648  				t.Skip("Skipping test in short mode")
  2649  			}
  2650  
  2651  			errorChan := make(chan struct{})
  2652  			close(errorChan)
  2653  			haltChan := make(chan struct{})
  2654  
  2655  			lastCutBlockNumber := uint64(3)
  2656  
  2657  			mockSupport := &mockmultichannel.ConsenterSupport{
  2658  				Blocks:              make(chan *cb.Block), // WriteBlock will post here
  2659  				BlockCutterVal:      mockblockcutter.NewReceiver(),
  2660  				ChannelIDVal:        mockChannel.topic(),
  2661  				HeightVal:           lastCutBlockNumber, // Incremented during the WriteBlock call
  2662  				SharedConfigVal:     newMockOrderer(longTimeout, []string{mockBroker.Addr()}, true),
  2663  				SequenceVal:         uint64(1),
  2664  				ProcessNormalMsgErr: fmt.Errorf("Invalid normal message"),
  2665  			}
  2666  			defer close(mockSupport.BlockCutterVal.Block)
  2667  
  2668  			bareMinimumChain := &chainImpl{
  2669  				parentConsumer:  mockParentConsumer,
  2670  				channelConsumer: mockChannelConsumer,
  2671  
  2672  				channel:            mockChannel,
  2673  				ConsenterSupport:   mockSupport,
  2674  				lastCutBlockNumber: lastCutBlockNumber,
  2675  
  2676  				errorChan:                      errorChan,
  2677  				haltChan:                       haltChan,
  2678  				doneProcessingMessagesToBlocks: make(chan struct{}),
  2679  			}
  2680  
  2681  			var counts []uint64
  2682  			done := make(chan struct{})
  2683  
  2684  			go func() {
  2685  				counts, err = bareMinimumChain.processMessagesToBlocks()
  2686  				done <- struct{}{}
  2687  			}()
  2688  
  2689  			mpc.YieldMessage(newMockConsumerMessage(newNormalMessage(
  2690  				protoutil.MarshalOrPanic(newMockNormalEnvelope(t)),
  2691  				uint64(0),
  2692  				int64(0))))
  2693  			select {
  2694  			case <-mockSupport.Blocks:
  2695  				t.Fatalf("Expected no block being cut given invalid config message")
  2696  			case <-time.After(shortTimeout):
  2697  			}
  2698  
  2699  			close(haltChan) // Identical to chain.Halt()
  2700  			<-done
  2701  
  2702  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2703  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  2704  			assert.Equal(t, uint64(1), counts[indexProcessRegularError], "Expected 1 REGULAR message error")
  2705  		})
  2706  
  2707  		// This tests resubmission path with following steps:
  2708  		// 1) Kafka emits a message with lagged config seq, consenter is expected to re-process and
  2709  		//    re-submit the message. However, `WaitReady` shouldn't be blocked for a normal message
  2710  		// 2) Kafka is expected to receive a producer message where config seq is advanced to catch
  2711  		//    up with current config seq, and OriginalOffset is not nil to capture the offset that
  2712  		//    consenter previously received from Kafka
  2713  		// 3) when consenter receives Kafka message submitted in 2), where config seq is in sync,
  2714  		//    it cuts a block for it.
  2715  		t.Run("ValidResubmit", func(t *testing.T) {
  2716  			if testing.Short() {
  2717  				t.Skip("Skipping test in short mode")
  2718  			}
  2719  
  2720  			startChan := make(chan struct{})
  2721  			close(startChan)
  2722  			errorChan := make(chan struct{})
  2723  			close(errorChan)
  2724  			haltChan := make(chan struct{})
  2725  			doneReprocessing := make(chan struct{})
  2726  			close(doneReprocessing)
  2727  
  2728  			lastCutBlockNumber := uint64(3)
  2729  
  2730  			mockSupport := &mockmultichannel.ConsenterSupport{
  2731  				Blocks:          make(chan *cb.Block), // WriteBlock will post here
  2732  				BlockCutterVal:  mockblockcutter.NewReceiver(),
  2733  				ChannelIDVal:    mockChannel.topic(),
  2734  				HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  2735  				SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, true),
  2736  				SequenceVal:     uint64(1),
  2737  				ConfigSeqVal:    uint64(1),
  2738  			}
  2739  			defer close(mockSupport.BlockCutterVal.Block)
  2740  
  2741  			expectedKafkaMsgCh := make(chan *ab.KafkaMessage, 1)
  2742  			producer := mocks.NewSyncProducer(t, mockBrokerConfig)
  2743  			producer.ExpectSendMessageWithCheckerFunctionAndSucceed(func(val []byte) error {
  2744  				defer close(expectedKafkaMsgCh)
  2745  
  2746  				expectedKafkaMsg := &ab.KafkaMessage{}
  2747  				if err := proto.Unmarshal(val, expectedKafkaMsg); err != nil {
  2748  					return err
  2749  				}
  2750  
  2751  				regular := expectedKafkaMsg.GetRegular()
  2752  				if regular == nil {
  2753  					return fmt.Errorf("Expect message type to be regular")
  2754  				}
  2755  
  2756  				if regular.ConfigSeq != mockSupport.Sequence() {
  2757  					return fmt.Errorf("Expect new config seq to be %d, got %d", mockSupport.Sequence(), regular.ConfigSeq)
  2758  				}
  2759  
  2760  				if regular.OriginalOffset == 0 {
  2761  					return fmt.Errorf("Expect Original Offset to be non-zero if resubmission")
  2762  				}
  2763  
  2764  				expectedKafkaMsgCh <- expectedKafkaMsg
  2765  				return nil
  2766  			})
  2767  
  2768  			bareMinimumChain := &chainImpl{
  2769  				producer:        producer,
  2770  				parentConsumer:  mockParentConsumer,
  2771  				channelConsumer: mockChannelConsumer,
  2772  
  2773  				consenter:          mockConsenter,
  2774  				channel:            mockChannel,
  2775  				ConsenterSupport:   mockSupport,
  2776  				lastCutBlockNumber: lastCutBlockNumber,
  2777  
  2778  				startChan:                      startChan,
  2779  				errorChan:                      errorChan,
  2780  				haltChan:                       haltChan,
  2781  				doneProcessingMessagesToBlocks: make(chan struct{}),
  2782  				doneReprocessingMsgInFlight:    doneReprocessing,
  2783  			}
  2784  
  2785  			var counts []uint64
  2786  			done := make(chan struct{})
  2787  
  2788  			go func() {
  2789  				counts, err = bareMinimumChain.processMessagesToBlocks()
  2790  				done <- struct{}{}
  2791  			}()
  2792  
  2793  			mockSupport.BlockCutterVal.CutNext = true
  2794  
  2795  			mpc.YieldMessage(newMockConsumerMessage(newNormalMessage(
  2796  				protoutil.MarshalOrPanic(newMockNormalEnvelope(t)),
  2797  				uint64(0),
  2798  				int64(0))))
  2799  			select {
  2800  			case <-mockSupport.Blocks:
  2801  				t.Fatalf("Expected no block being cut given invalid config message")
  2802  			case <-time.After(shortTimeout):
  2803  			}
  2804  
  2805  			// check that WaitReady is not blocked for a in-flight reprocessed messages of type NORMAL
  2806  			waitReady := make(chan struct{})
  2807  			go func() {
  2808  				bareMinimumChain.WaitReady()
  2809  				waitReady <- struct{}{}
  2810  			}()
  2811  
  2812  			select {
  2813  			case <-waitReady:
  2814  			case <-time.After(100 * time.Millisecond):
  2815  				t.Fatalf("Expected WaitReady call to be unblock because all reprocessed messages are consumed")
  2816  			}
  2817  
  2818  			// Emits the kafka message produced by consenter
  2819  			select {
  2820  			case expectedKafkaMsg := <-expectedKafkaMsgCh:
  2821  				require.NotNil(t, expectedKafkaMsg)
  2822  				mpc.YieldMessage(newMockConsumerMessage(expectedKafkaMsg))
  2823  				mockSupport.BlockCutterVal.Block <- struct{}{}
  2824  			case <-time.After(shortTimeout):
  2825  				t.Fatalf("Expected to receive kafka message")
  2826  			}
  2827  
  2828  			select {
  2829  			case <-mockSupport.Blocks:
  2830  			case <-time.After(shortTimeout):
  2831  				t.Fatalf("Expected one block being cut")
  2832  			}
  2833  
  2834  			close(haltChan) // Identical to chain.Halt()
  2835  			<-done
  2836  
  2837  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2838  			assert.Equal(t, uint64(2), counts[indexRecvPass], "Expected 2 message received and unmarshaled")
  2839  			assert.Equal(t, uint64(2), counts[indexProcessRegularPass], "Expected 2 REGULAR message error")
  2840  		})
  2841  	})
  2842  
  2843  	t.Run("Config", func(t *testing.T) {
  2844  		// This test lets kafka emit a mock re-submitted message that does not require reprocessing
  2845  		// (by setting OriginalOffset <= lastOriginalOffsetProcessed)
  2846  		t.Run("AlreadyProcessedDiscard", func(t *testing.T) {
  2847  			if testing.Short() {
  2848  				t.Skip("Skipping test in short mode")
  2849  			}
  2850  
  2851  			errorChan := make(chan struct{})
  2852  			close(errorChan)
  2853  			haltChan := make(chan struct{})
  2854  
  2855  			lastCutBlockNumber := uint64(3)
  2856  			lastOriginalOffsetProcessed := int64(3)
  2857  
  2858  			mockSupport := &mockmultichannel.ConsenterSupport{
  2859  				Blocks:          make(chan *cb.Block), // WriteBlock will post here
  2860  				BlockCutterVal:  mockblockcutter.NewReceiver(),
  2861  				ChannelIDVal:    mockChannel.topic(),
  2862  				HeightVal:       lastCutBlockNumber, // Incremented during the WriteBlock call
  2863  				SharedConfigVal: newMockOrderer(longTimeout, []string{mockBroker.Addr()}, true),
  2864  			}
  2865  			defer close(mockSupport.BlockCutterVal.Block)
  2866  
  2867  			bareMinimumChain := &chainImpl{
  2868  				parentConsumer:  mockParentConsumer,
  2869  				channelConsumer: mockChannelConsumer,
  2870  
  2871  				channel:                     mockChannel,
  2872  				ConsenterSupport:            mockSupport,
  2873  				lastCutBlockNumber:          lastCutBlockNumber,
  2874  				lastOriginalOffsetProcessed: lastOriginalOffsetProcessed,
  2875  
  2876  				errorChan:                      errorChan,
  2877  				haltChan:                       haltChan,
  2878  				doneProcessingMessagesToBlocks: make(chan struct{}),
  2879  			}
  2880  
  2881  			var counts []uint64
  2882  			done := make(chan struct{})
  2883  
  2884  			go func() {
  2885  				counts, err = bareMinimumChain.processMessagesToBlocks()
  2886  				done <- struct{}{}
  2887  			}()
  2888  
  2889  			mpc.YieldMessage(newMockConsumerMessage(newConfigMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")), uint64(0), int64(2))))
  2890  
  2891  			select {
  2892  			case <-mockSupport.Blocks:
  2893  				t.Fatalf("Expected no block being cut")
  2894  			case <-time.After(shortTimeout):
  2895  			}
  2896  
  2897  			close(haltChan)
  2898  			logger.Debug("haltChan closed")
  2899  			<-done
  2900  
  2901  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  2902  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 2 message received and unmarshaled")
  2903  			assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  2904  		})
  2905  
  2906  		// This test simulated the non-deterministic case, where somebody resubmitted message at offset X,
  2907  		// whereas we didn't. That message was considered invalid by us during revalidation, however somebody
  2908  		// else deemed it to be valid, and resubmitted it.
  2909  		t.Run("Non-determinism", func(t *testing.T) {
  2910  			if testing.Short() {
  2911  				t.Skip("Skipping test in short mode")
  2912  			}
  2913  
  2914  			startChan := make(chan struct{})
  2915  			close(startChan)
  2916  			errorChan := make(chan struct{})
  2917  			close(errorChan)
  2918  			haltChan := make(chan struct{})
  2919  			doneReprocessing := make(chan struct{})
  2920  			close(doneReprocessing)
  2921  
  2922  			lastCutBlockNumber := uint64(3)
  2923  
  2924  			mockSupport := &mockmultichannel.ConsenterSupport{
  2925  				Blocks:              make(chan *cb.Block), // WriteBlock will post here
  2926  				BlockCutterVal:      mockblockcutter.NewReceiver(),
  2927  				ChannelIDVal:        mockChannel.topic(),
  2928  				HeightVal:           lastCutBlockNumber, // Incremented during the WriteBlock call
  2929  				SharedConfigVal:     newMockOrderer(longTimeout, []string{mockBroker.Addr()}, true),
  2930  				SequenceVal:         uint64(1),
  2931  				ConfigSeqVal:        uint64(1),
  2932  				ProcessConfigMsgVal: newMockConfigEnvelope(),
  2933  			}
  2934  			defer close(mockSupport.BlockCutterVal.Block)
  2935  
  2936  			bareMinimumChain := &chainImpl{
  2937  				parentConsumer:  mockParentConsumer,
  2938  				channelConsumer: mockChannelConsumer,
  2939  
  2940  				consenter:          mockConsenter,
  2941  				channel:            mockChannel,
  2942  				ConsenterSupport:   mockSupport,
  2943  				lastCutBlockNumber: lastCutBlockNumber,
  2944  
  2945  				lastResubmittedConfigOffset: int64(0),
  2946  
  2947  				startChan:                      startChan,
  2948  				errorChan:                      errorChan,
  2949  				haltChan:                       haltChan,
  2950  				doneProcessingMessagesToBlocks: make(chan struct{}),
  2951  				doneReprocessingMsgInFlight:    doneReprocessing,
  2952  			}
  2953  
  2954  			var counts []uint64
  2955  			done := make(chan struct{})
  2956  
  2957  			go func() {
  2958  				counts, err = bareMinimumChain.processMessagesToBlocks()
  2959  				done <- struct{}{}
  2960  			}()
  2961  
  2962  			// check that WaitReady is not blocked at beginning
  2963  			blockIngressMsg(t, false, bareMinimumChain.WaitReady)
  2964  
  2965  			// Message should be revalidated but considered invalid, so we don't resubmit it
  2966  			mockSupport.ProcessConfigMsgErr = fmt.Errorf("invalid message found during revalidation")
  2967  
  2968  			// Emits a config message with lagged config sequence
  2969  			mpc.YieldMessage(newMockConsumerMessage(newConfigMessage(
  2970  				protoutil.MarshalOrPanic(newMockConfigEnvelope()),
  2971  				uint64(0),
  2972  				int64(0))))
  2973  			select {
  2974  			case <-mockSupport.Blocks:
  2975  				t.Fatalf("Expected no block being cut")
  2976  			case <-time.After(shortTimeout):
  2977  			}
  2978  
  2979  			// check that WaitReady is still not blocked because we haven't resubmitted anything
  2980  			blockIngressMsg(t, false, bareMinimumChain.WaitReady)
  2981  
  2982  			// Somebody else resubmitted the message which we deemed to be invalid
  2983  			// We deliberately keep ProcessConfigMsgErr unchanged, so we could be
  2984  			// certain that we are not running into revalidation path.
  2985  			mpc.YieldMessage(newMockConsumerMessage(newConfigMessage(
  2986  				protoutil.MarshalOrPanic(newMockConfigEnvelope()),
  2987  				uint64(1),
  2988  				int64(5))))
  2989  
  2990  			select {
  2991  			case block := <-mockSupport.Blocks:
  2992  				metadata, err := protoutil.GetMetadataFromBlock(block, cb.BlockMetadataIndex_ORDERER)
  2993  				assert.NoError(t, err, "Failed to get metadata from block")
  2994  				kafkaMetadata := &ab.KafkaMetadata{}
  2995  				err = proto.Unmarshal(metadata.Value, kafkaMetadata)
  2996  				assert.NoError(t, err, "Failed to unmarshal metadata")
  2997  
  2998  				assert.Equal(t, kafkaMetadata.LastResubmittedConfigOffset, int64(5), "LastResubmittedConfigOffset didn't catch up")
  2999  				assert.Equal(t, kafkaMetadata.LastOriginalOffsetProcessed, int64(5), "LastOriginalOffsetProcessed doesn't match")
  3000  			case <-time.After(shortTimeout):
  3001  				t.Fatalf("Expected one block being cut")
  3002  			}
  3003  
  3004  			close(haltChan) // Identical to chain.Halt()
  3005  			<-done
  3006  
  3007  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  3008  			assert.Equal(t, uint64(2), counts[indexRecvPass], "Expected 2 message received and unmarshaled")
  3009  			assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 2 REGULAR message error")
  3010  		})
  3011  
  3012  		// This test lets kafka emit a mock re-submitted message whose config seq is still behind
  3013  		t.Run("ResubmittedMsgStillBehind", func(t *testing.T) {
  3014  			if testing.Short() {
  3015  				t.Skip("Skipping test in short mode")
  3016  			}
  3017  
  3018  			startChan := make(chan struct{})
  3019  			close(startChan)
  3020  			errorChan := make(chan struct{})
  3021  			close(errorChan)
  3022  			haltChan := make(chan struct{})
  3023  
  3024  			lastCutBlockNumber := uint64(3)
  3025  			lastOriginalOffsetProcessed := int64(3)
  3026  
  3027  			mockSupport := &mockmultichannel.ConsenterSupport{
  3028  				Blocks:              make(chan *cb.Block), // WriteBlock will post here
  3029  				BlockCutterVal:      mockblockcutter.NewReceiver(),
  3030  				ChannelIDVal:        mockChannel.topic(),
  3031  				HeightVal:           lastCutBlockNumber, // Incremented during the WriteBlock call
  3032  				SharedConfigVal:     newMockOrderer(longTimeout, []string{mockBroker.Addr()}, true),
  3033  				ChannelConfigVal:    newMockChannel(),
  3034  				SequenceVal:         uint64(2),
  3035  				ProcessConfigMsgVal: newMockConfigEnvelope(),
  3036  			}
  3037  			defer close(mockSupport.BlockCutterVal.Block)
  3038  
  3039  			producer := mocks.NewSyncProducer(t, mockBrokerConfig)
  3040  			producer.ExpectSendMessageWithCheckerFunctionAndSucceed(func(val []byte) error {
  3041  				return nil
  3042  			})
  3043  
  3044  			bareMinimumChain := &chainImpl{
  3045  				producer:        producer,
  3046  				parentConsumer:  mockParentConsumer,
  3047  				channelConsumer: mockChannelConsumer,
  3048  
  3049  				channel:                     mockChannel,
  3050  				ConsenterSupport:            mockSupport,
  3051  				lastCutBlockNumber:          lastCutBlockNumber,
  3052  				lastOriginalOffsetProcessed: lastOriginalOffsetProcessed,
  3053  
  3054  				startChan:                      startChan,
  3055  				errorChan:                      errorChan,
  3056  				haltChan:                       haltChan,
  3057  				doneProcessingMessagesToBlocks: make(chan struct{}),
  3058  				doneReprocessingMsgInFlight:    make(chan struct{}),
  3059  			}
  3060  
  3061  			// WaitReady should block at beginning since we are in the middle of reprocessing
  3062  			blockIngressMsg(t, true, bareMinimumChain.WaitReady)
  3063  
  3064  			var counts []uint64
  3065  			done := make(chan struct{})
  3066  
  3067  			go func() {
  3068  				counts, err = bareMinimumChain.processMessagesToBlocks()
  3069  				done <- struct{}{}
  3070  			}()
  3071  
  3072  			mpc.YieldMessage(newMockConsumerMessage(newConfigMessage(protoutil.MarshalOrPanic(newMockEnvelope("fooMessage")), uint64(1), int64(4))))
  3073  			select {
  3074  			case <-mockSupport.Blocks:
  3075  				t.Fatalf("Expected no block being cut")
  3076  			case <-time.After(shortTimeout):
  3077  			}
  3078  
  3079  			// WaitReady should still block as resubmitted config message is still behind current config seq
  3080  			blockIngressMsg(t, true, bareMinimumChain.WaitReady)
  3081  
  3082  			close(haltChan)
  3083  			logger.Debug("haltChan closed")
  3084  			<-done
  3085  
  3086  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  3087  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  3088  			assert.Equal(t, uint64(1), counts[indexProcessRegularPass], "Expected 1 REGULAR message processed")
  3089  		})
  3090  
  3091  		t.Run("InvalidDiscard", func(t *testing.T) {
  3092  			if testing.Short() {
  3093  				t.Skip("Skipping test in short mode")
  3094  			}
  3095  
  3096  			errorChan := make(chan struct{})
  3097  			close(errorChan)
  3098  			haltChan := make(chan struct{})
  3099  
  3100  			lastCutBlockNumber := uint64(3)
  3101  
  3102  			mockSupport := &mockmultichannel.ConsenterSupport{
  3103  				Blocks:              make(chan *cb.Block), // WriteBlock will post here
  3104  				BlockCutterVal:      mockblockcutter.NewReceiver(),
  3105  				ChannelIDVal:        mockChannel.topic(),
  3106  				HeightVal:           lastCutBlockNumber, // Incremented during the WriteBlock call
  3107  				SharedConfigVal:     newMockOrderer(longTimeout, []string{mockBroker.Addr()}, true),
  3108  				SequenceVal:         uint64(1),
  3109  				ProcessConfigMsgErr: fmt.Errorf("Invalid config message"),
  3110  			}
  3111  			defer close(mockSupport.BlockCutterVal.Block)
  3112  
  3113  			bareMinimumChain := &chainImpl{
  3114  				parentConsumer:  mockParentConsumer,
  3115  				channelConsumer: mockChannelConsumer,
  3116  
  3117  				channel:            mockChannel,
  3118  				ConsenterSupport:   mockSupport,
  3119  				lastCutBlockNumber: lastCutBlockNumber,
  3120  
  3121  				errorChan:                      errorChan,
  3122  				haltChan:                       haltChan,
  3123  				doneProcessingMessagesToBlocks: make(chan struct{}),
  3124  			}
  3125  
  3126  			var counts []uint64
  3127  			done := make(chan struct{})
  3128  
  3129  			go func() {
  3130  				counts, err = bareMinimumChain.processMessagesToBlocks()
  3131  				done <- struct{}{}
  3132  			}()
  3133  
  3134  			mpc.YieldMessage(newMockConsumerMessage(newConfigMessage(
  3135  				protoutil.MarshalOrPanic(newMockNormalEnvelope(t)),
  3136  				uint64(0),
  3137  				int64(0))))
  3138  			select {
  3139  			case <-mockSupport.Blocks:
  3140  				t.Fatalf("Expected no block being cut given invalid config message")
  3141  			case <-time.After(shortTimeout):
  3142  			}
  3143  
  3144  			close(haltChan) // Identical to chain.Halt()
  3145  			<-done
  3146  
  3147  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  3148  			assert.Equal(t, uint64(1), counts[indexRecvPass], "Expected 1 message received and unmarshaled")
  3149  			assert.Equal(t, uint64(1), counts[indexProcessRegularError], "Expected 1 REGULAR message error")
  3150  		})
  3151  
  3152  		// This tests resubmission path with following steps:
  3153  		// 1) Kafka emits a message with lagged config seq, consenter is expected to re-process and
  3154  		//    re-submit the message, as well as block `WaitReady` API
  3155  		// 2) Kafka is expected to receive a producer message where config seq is advanced to catch
  3156  		//    up with current config seq, and OriginalOffset is not nil to capture the offset that
  3157  		//    consenter previously received from Kafka
  3158  		// 3) when consenter receives Kafka message submitted in 2), where config seq is in sync,
  3159  		//    it cuts a block for it and lifts block on `WaitReady`.
  3160  		t.Run("ValidResubmit", func(t *testing.T) {
  3161  			if testing.Short() {
  3162  				t.Skip("Skipping test in short mode")
  3163  			}
  3164  
  3165  			startChan := make(chan struct{})
  3166  			close(startChan)
  3167  			errorChan := make(chan struct{})
  3168  			close(errorChan)
  3169  			haltChan := make(chan struct{})
  3170  			doneReprocessing := make(chan struct{})
  3171  			close(doneReprocessing)
  3172  
  3173  			lastCutBlockNumber := uint64(3)
  3174  
  3175  			mockSupport := &mockmultichannel.ConsenterSupport{
  3176  				Blocks:              make(chan *cb.Block), // WriteBlock will post here
  3177  				BlockCutterVal:      mockblockcutter.NewReceiver(),
  3178  				ChannelIDVal:        mockChannel.topic(),
  3179  				HeightVal:           lastCutBlockNumber, // Incremented during the WriteBlock call
  3180  				SharedConfigVal:     newMockOrderer(longTimeout, []string{mockBroker.Addr()}, true),
  3181  				ChannelConfigVal:    newMockChannel(),
  3182  				SequenceVal:         uint64(1),
  3183  				ConfigSeqVal:        uint64(1),
  3184  				ProcessConfigMsgVal: newMockConfigEnvelope(),
  3185  			}
  3186  			defer close(mockSupport.BlockCutterVal.Block)
  3187  
  3188  			expectedKafkaMsgCh := make(chan *ab.KafkaMessage, 1)
  3189  			producer := mocks.NewSyncProducer(t, mockBrokerConfig)
  3190  			producer.ExpectSendMessageWithCheckerFunctionAndSucceed(func(val []byte) error {
  3191  				defer close(expectedKafkaMsgCh)
  3192  
  3193  				expectedKafkaMsg := &ab.KafkaMessage{}
  3194  				if err := proto.Unmarshal(val, expectedKafkaMsg); err != nil {
  3195  					return err
  3196  				}
  3197  
  3198  				regular := expectedKafkaMsg.GetRegular()
  3199  				if regular == nil {
  3200  					return fmt.Errorf("Expect message type to be regular")
  3201  				}
  3202  
  3203  				if regular.ConfigSeq != mockSupport.Sequence() {
  3204  					return fmt.Errorf("Expect new config seq to be %d, got %d", mockSupport.Sequence(), regular.ConfigSeq)
  3205  				}
  3206  
  3207  				if regular.OriginalOffset == 0 {
  3208  					return fmt.Errorf("Expect Original Offset to be non-zero if resubmission")
  3209  				}
  3210  
  3211  				expectedKafkaMsgCh <- expectedKafkaMsg
  3212  				return nil
  3213  			})
  3214  
  3215  			bareMinimumChain := &chainImpl{
  3216  				producer:        producer,
  3217  				parentConsumer:  mockParentConsumer,
  3218  				channelConsumer: mockChannelConsumer,
  3219  
  3220  				consenter:          mockConsenter,
  3221  				channel:            mockChannel,
  3222  				ConsenterSupport:   mockSupport,
  3223  				lastCutBlockNumber: lastCutBlockNumber,
  3224  
  3225  				startChan:                      startChan,
  3226  				errorChan:                      errorChan,
  3227  				haltChan:                       haltChan,
  3228  				doneProcessingMessagesToBlocks: make(chan struct{}),
  3229  				doneReprocessingMsgInFlight:    doneReprocessing,
  3230  			}
  3231  
  3232  			var counts []uint64
  3233  			done := make(chan struct{})
  3234  
  3235  			go func() {
  3236  				counts, err = bareMinimumChain.processMessagesToBlocks()
  3237  				done <- struct{}{}
  3238  			}()
  3239  
  3240  			// check that WaitReady is not blocked at beginning
  3241  			blockIngressMsg(t, false, bareMinimumChain.WaitReady)
  3242  
  3243  			// Emits a config message with lagged config sequence
  3244  			mpc.YieldMessage(newMockConsumerMessage(newConfigMessage(
  3245  				protoutil.MarshalOrPanic(newMockConfigEnvelope()),
  3246  				uint64(0),
  3247  				int64(0))))
  3248  			select {
  3249  			case <-mockSupport.Blocks:
  3250  				t.Fatalf("Expected no block being cut given lagged config message")
  3251  			case <-time.After(shortTimeout):
  3252  			}
  3253  
  3254  			// check that WaitReady is actually blocked because of in-flight reprocessed messages
  3255  			blockIngressMsg(t, true, bareMinimumChain.WaitReady)
  3256  
  3257  			select {
  3258  			case expectedKafkaMsg := <-expectedKafkaMsgCh:
  3259  				require.NotNil(t, expectedKafkaMsg)
  3260  				// Emits the kafka message produced by consenter
  3261  				mpc.YieldMessage(newMockConsumerMessage(expectedKafkaMsg))
  3262  			case <-time.After(shortTimeout):
  3263  				t.Fatalf("Expected to receive kafka message")
  3264  			}
  3265  
  3266  			select {
  3267  			case <-mockSupport.Blocks:
  3268  			case <-time.After(shortTimeout):
  3269  				t.Fatalf("Expected one block being cut")
  3270  			}
  3271  
  3272  			// `WaitReady` should be unblocked now
  3273  			blockIngressMsg(t, false, bareMinimumChain.WaitReady)
  3274  
  3275  			close(haltChan) // Identical to chain.Halt()
  3276  			<-done
  3277  
  3278  			assert.NoError(t, err, "Expected the processMessagesToBlocks call to return without errors")
  3279  			assert.Equal(t, uint64(2), counts[indexRecvPass], "Expected 2 message received and unmarshaled")
  3280  			assert.Equal(t, uint64(2), counts[indexProcessRegularPass], "Expected 2 REGULAR message error")
  3281  		})
  3282  	})
  3283  }
  3284  
  3285  // Test helper functions here.
  3286  
  3287  func newRegularMessage(payload []byte) *ab.KafkaMessage {
  3288  	return &ab.KafkaMessage{
  3289  		Type: &ab.KafkaMessage_Regular{
  3290  			Regular: &ab.KafkaMessageRegular{
  3291  				Payload: payload,
  3292  			},
  3293  		},
  3294  	}
  3295  }
  3296  
  3297  func newMockNormalEnvelope(t *testing.T) *cb.Envelope {
  3298  	return &cb.Envelope{Payload: protoutil.MarshalOrPanic(&cb.Payload{
  3299  		Header: &cb.Header{ChannelHeader: protoutil.MarshalOrPanic(
  3300  			&cb.ChannelHeader{Type: int32(cb.HeaderType_MESSAGE), ChannelId: channelNameForTest(t)})},
  3301  		Data: []byte("Foo"),
  3302  	})}
  3303  }
  3304  
  3305  func newMockConfigEnvelope() *cb.Envelope {
  3306  	return &cb.Envelope{Payload: protoutil.MarshalOrPanic(&cb.Payload{
  3307  		Header: &cb.Header{ChannelHeader: protoutil.MarshalOrPanic(
  3308  			&cb.ChannelHeader{Type: int32(cb.HeaderType_CONFIG), ChannelId: "foo"})},
  3309  		Data: protoutil.MarshalOrPanic(&cb.ConfigEnvelope{}),
  3310  	})}
  3311  }
  3312  
  3313  func newMockOrdererTxEnvelope() *cb.Envelope {
  3314  	return &cb.Envelope{Payload: protoutil.MarshalOrPanic(&cb.Payload{
  3315  		Header: &cb.Header{ChannelHeader: protoutil.MarshalOrPanic(
  3316  			&cb.ChannelHeader{Type: int32(cb.HeaderType_ORDERER_TRANSACTION), ChannelId: "foo"})},
  3317  		Data: protoutil.MarshalOrPanic(newMockConfigEnvelope()),
  3318  	})}
  3319  }
  3320  
  3321  func TestDeliverSession(t *testing.T) {
  3322  
  3323  	type testEnvironment struct {
  3324  		channelID  string
  3325  		topic      string
  3326  		partition  int32
  3327  		height     int64
  3328  		nextOffset int64
  3329  		support    *mockConsenterSupport
  3330  		broker0    *sarama.MockBroker
  3331  		broker1    *sarama.MockBroker
  3332  		broker2    *sarama.MockBroker
  3333  		testMsg    sarama.Encoder
  3334  	}
  3335  
  3336  	// initializes test environment
  3337  	newTestEnvironment := func(t *testing.T) *testEnvironment {
  3338  
  3339  		channelID := channelNameForTest(t)
  3340  		topic := channelID
  3341  		partition := int32(defaultPartition)
  3342  		height := int64(100)
  3343  		nextOffset := height + 1
  3344  		broker0 := sarama.NewMockBroker(t, 0)
  3345  		broker1 := sarama.NewMockBroker(t, 1)
  3346  		broker2 := sarama.NewMockBroker(t, 2)
  3347  
  3348  		// broker0 will seed the info about the other brokers and the partition leader
  3349  		broker0.SetHandlerByMap(map[string]sarama.MockResponse{
  3350  			"MetadataRequest": sarama.NewMockMetadataResponse(t).
  3351  				SetBroker(broker1.Addr(), broker1.BrokerID()).
  3352  				SetBroker(broker2.Addr(), broker2.BrokerID()).
  3353  				SetLeader(topic, partition, broker1.BrokerID()),
  3354  		})
  3355  
  3356  		// configure broker1 with responses needed for startup
  3357  		broker1.SetHandlerByMap(map[string]sarama.MockResponse{
  3358  			// CONNECT ProduceRequest
  3359  			"ProduceRequest": sarama.NewMockProduceResponse(t).
  3360  				SetError(topic, partition, sarama.ErrNoError),
  3361  			// respond to request for offset of topic
  3362  			"OffsetRequest": sarama.NewMockOffsetResponse(t).
  3363  				SetOffset(topic, partition, sarama.OffsetOldest, 0).
  3364  				SetOffset(topic, partition, sarama.OffsetNewest, nextOffset),
  3365  			// respond to fetch requests with empty response while starting up
  3366  			"FetchRequest": sarama.NewMockFetchResponse(t, 1),
  3367  		})
  3368  
  3369  		// configure broker2 with a default fetch request response
  3370  		broker2.SetHandlerByMap(map[string]sarama.MockResponse{
  3371  			// respond to fetch requests with empty response while starting up
  3372  			"FetchRequest": sarama.NewMockFetchResponse(t, 1),
  3373  		})
  3374  
  3375  		// setup mock blockcutter
  3376  		blockcutter := &mockReceiver{}
  3377  		blockcutter.On("Ordered", mock.Anything).Return([][]*cb.Envelope{{&cb.Envelope{}}}, false)
  3378  
  3379  		// setup mock chain support and mock method calls
  3380  		support := &mockConsenterSupport{}
  3381  		support.On("Height").Return(uint64(height))
  3382  		support.On("ChannelID").Return(topic)
  3383  		support.On("Sequence").Return(uint64(0))
  3384  		support.On("SharedConfig").Return(newMockOrderer(0, []string{broker0.Addr()}, false))
  3385  		support.On("ClassifyMsg", mock.Anything).Return(msgprocessor.NormalMsg, nil)
  3386  		support.On("ProcessNormalMsg", mock.Anything).Return(uint64(0), nil)
  3387  		support.On("BlockCutter").Return(blockcutter)
  3388  		support.On("CreateNextBlock", mock.Anything).Return(&cb.Block{})
  3389  		support.On("Serialize", []byte("creator"), nil)
  3390  
  3391  		// test message that will be returned by mock brokers
  3392  		testMsg := sarama.ByteEncoder(protoutil.MarshalOrPanic(
  3393  			newRegularMessage(protoutil.MarshalOrPanic(&cb.Envelope{
  3394  				Payload: protoutil.MarshalOrPanic(&cb.Payload{
  3395  					Header: &cb.Header{
  3396  						ChannelHeader: protoutil.MarshalOrPanic(&cb.ChannelHeader{
  3397  							ChannelId: topic,
  3398  						}),
  3399  					},
  3400  					Data: []byte("TEST_DATA"),
  3401  				})})),
  3402  		))
  3403  
  3404  		return &testEnvironment{
  3405  			channelID:  channelID,
  3406  			topic:      topic,
  3407  			partition:  partition,
  3408  			height:     height,
  3409  			nextOffset: nextOffset,
  3410  			support:    support,
  3411  			broker0:    broker0,
  3412  			broker1:    broker1,
  3413  			broker2:    broker2,
  3414  			testMsg:    testMsg,
  3415  		}
  3416  	}
  3417  
  3418  	// BrokerDeath simulates the partition leader dying and a
  3419  	// second broker becoming the leader before the deliver session times out.
  3420  	t.Run("BrokerDeath", func(t *testing.T) {
  3421  
  3422  		// initialize test environment
  3423  		env := newTestEnvironment(t)
  3424  
  3425  		// broker1 will be closed within the test
  3426  		defer env.broker0.Close()
  3427  		defer env.broker2.Close()
  3428  
  3429  		// initialize consenter
  3430  		consenter, _ := New(mockLocalConfig.Kafka, &disabled.Provider{}, &mockkafka.HealthChecker{}, nil, func(string) {})
  3431  
  3432  		// initialize chain
  3433  		metadata := &cb.Metadata{Value: protoutil.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: env.height})}
  3434  		chain, err := consenter.HandleChain(env.support, metadata)
  3435  		if err != nil {
  3436  			t.Fatal(err)
  3437  		}
  3438  
  3439  		// start the chain, and wait for it to settle down
  3440  		chain.Start()
  3441  		select {
  3442  		case <-chain.(*chainImpl).startChan:
  3443  			logger.Debug("chain started")
  3444  		case <-time.After(shortTimeout):
  3445  			t.Fatal("chain should have started by now")
  3446  		}
  3447  
  3448  		// direct blocks to this channel
  3449  		blocks := make(chan *cb.Block, 1)
  3450  		env.support.On("WriteBlock", mock.Anything, mock.Anything).Return().Run(func(arg1 mock.Arguments) {
  3451  			blocks <- arg1.Get(0).(*cb.Block)
  3452  		})
  3453  
  3454  		// send a few messages from broker1
  3455  		fetchResponse1 := sarama.NewMockFetchResponse(t, 1)
  3456  		for i := 0; i < 5; i++ {
  3457  			fetchResponse1.SetMessage(env.topic, env.partition, env.nextOffset, env.testMsg)
  3458  			env.nextOffset++
  3459  		}
  3460  		env.broker1.SetHandlerByMap(map[string]sarama.MockResponse{
  3461  			"FetchRequest": fetchResponse1,
  3462  		})
  3463  
  3464  		logger.Debug("Waiting for messages from broker1")
  3465  		for i := 0; i < 5; i++ {
  3466  			select {
  3467  			case <-blocks:
  3468  			case <-time.After(shortTimeout):
  3469  				t.Fatalf("timed out waiting for messages (receieved %d messages)", i)
  3470  			}
  3471  		}
  3472  
  3473  		// prepare broker2 to send a few messages
  3474  		fetchResponse2 := sarama.NewMockFetchResponse(t, 1)
  3475  		for i := 0; i < 5; i++ {
  3476  			fetchResponse2.SetMessage(env.topic, env.partition, env.nextOffset, env.testMsg)
  3477  			env.nextOffset++
  3478  		}
  3479  
  3480  		env.broker2.SetHandlerByMap(map[string]sarama.MockResponse{
  3481  			"ProduceRequest": sarama.NewMockProduceResponse(t).
  3482  				SetError(env.topic, env.partition, sarama.ErrNoError),
  3483  			"FetchRequest": fetchResponse2,
  3484  		})
  3485  
  3486  		// shutdown broker1
  3487  		env.broker1.Close()
  3488  
  3489  		// prepare broker0 to respond that broker2 is now the leader
  3490  		env.broker0.SetHandlerByMap(map[string]sarama.MockResponse{
  3491  			"MetadataRequest": sarama.NewMockMetadataResponse(t).
  3492  				SetLeader(env.topic, env.partition, env.broker2.BrokerID()),
  3493  		})
  3494  
  3495  		logger.Debug("Waiting for messages from broker2")
  3496  		for i := 0; i < 5; i++ {
  3497  			select {
  3498  			case <-blocks:
  3499  			case <-time.After(shortTimeout):
  3500  				t.Fatalf("timed out waiting for messages (receieved %d messages)", i)
  3501  			}
  3502  		}
  3503  
  3504  		chain.Halt()
  3505  	})
  3506  
  3507  	// An ErrOffsetOutOfRange is non-recoverable
  3508  	t.Run("ErrOffsetOutOfRange", func(t *testing.T) {
  3509  
  3510  		// initialize test environment
  3511  		env := newTestEnvironment(t)
  3512  
  3513  		// broker cleanup
  3514  		defer env.broker2.Close()
  3515  		defer env.broker1.Close()
  3516  		defer env.broker0.Close()
  3517  
  3518  		// initialize consenter
  3519  		consenter, _ := New(mockLocalConfig.Kafka, &disabled.Provider{}, &mockkafka.HealthChecker{}, nil, func(string) {})
  3520  
  3521  		// initialize chain
  3522  		metadata := &cb.Metadata{Value: protoutil.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: env.height})}
  3523  		chain, err := consenter.HandleChain(env.support, metadata)
  3524  		if err != nil {
  3525  			t.Fatal(err)
  3526  		}
  3527  
  3528  		// start the chain, and wait for it to settle down
  3529  		chain.Start()
  3530  		select {
  3531  		case <-chain.(*chainImpl).startChan:
  3532  			logger.Debug("chain started")
  3533  		case <-time.After(shortTimeout):
  3534  			t.Fatal("chain should have started by now")
  3535  		}
  3536  
  3537  		// direct blocks to this channel
  3538  		blocks := make(chan *cb.Block, 1)
  3539  		env.support.On("WriteBlock", mock.Anything, mock.Anything).Return().Run(func(arg1 mock.Arguments) {
  3540  			blocks <- arg1.Get(0).(*cb.Block)
  3541  		})
  3542  
  3543  		// set broker1 to respond to two fetch requests:
  3544  		// - The first fetch request will get an ErrOffsetOutOfRange error response.
  3545  		// - The second fetch request will get a valid (i.e. non-error) response.
  3546  		fetchResponse := &sarama.FetchResponse{}
  3547  		fetchResponse.AddError(env.topic, env.partition, sarama.ErrOffsetOutOfRange)
  3548  		fetchResponse.AddMessage(env.topic, env.partition, nil, env.testMsg, env.nextOffset)
  3549  		env.nextOffset++
  3550  		env.broker1.SetHandlerByMap(map[string]sarama.MockResponse{
  3551  			"FetchRequest": sarama.NewMockWrapper(fetchResponse),
  3552  			// answers for CONNECT message
  3553  			"ProduceRequest": sarama.NewMockProduceResponse(t).
  3554  				SetError(env.topic, env.partition, sarama.ErrNoError),
  3555  		})
  3556  
  3557  		select {
  3558  		case <-blocks:
  3559  			// the valid fetch response should not of been fetched
  3560  			t.Fatal("Did not expect new blocks")
  3561  		case <-time.After(shortTimeout):
  3562  			t.Fatal("Errored() should have closed by now")
  3563  		case <-chain.Errored():
  3564  		}
  3565  
  3566  		chain.Halt()
  3567  	})
  3568  
  3569  	// test chain timeout
  3570  	t.Run("DeliverSessionTimedOut", func(t *testing.T) {
  3571  
  3572  		// initialize test environment
  3573  		env := newTestEnvironment(t)
  3574  
  3575  		// broker cleanup
  3576  		defer env.broker2.Close()
  3577  		defer env.broker1.Close()
  3578  		defer env.broker0.Close()
  3579  
  3580  		// initialize consenter
  3581  		consenter, _ := New(mockLocalConfig.Kafka, &disabled.Provider{}, &mockkafka.HealthChecker{}, nil, func(string) {})
  3582  
  3583  		// initialize chain
  3584  		metadata := &cb.Metadata{Value: protoutil.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: env.height})}
  3585  		chain, err := consenter.HandleChain(env.support, metadata)
  3586  		if err != nil {
  3587  			t.Fatal(err)
  3588  		}
  3589  
  3590  		// start the chain, and wait for it to settle down
  3591  		chain.Start()
  3592  		select {
  3593  		case <-chain.(*chainImpl).startChan:
  3594  			logger.Debug("chain started")
  3595  		case <-time.After(shortTimeout):
  3596  			t.Fatal("chain should have started by now")
  3597  		}
  3598  
  3599  		// direct blocks to this channel
  3600  		blocks := make(chan *cb.Block, 1)
  3601  		env.support.On("WriteBlock", mock.Anything, mock.Anything).Return().Run(func(arg1 mock.Arguments) {
  3602  			blocks <- arg1.Get(0).(*cb.Block)
  3603  		})
  3604  
  3605  		metadataResponse := new(sarama.MetadataResponse)
  3606  		metadataResponse.AddTopicPartition(env.topic, env.partition, -1, []int32{}, []int32{}, sarama.ErrBrokerNotAvailable)
  3607  
  3608  		// configure seed broker to return error on metadata request, otherwise the
  3609  		// consumer client will keep 'subscribing' successfully to the topic/partition
  3610  		env.broker0.SetHandlerByMap(map[string]sarama.MockResponse{
  3611  			"MetadataRequest": sarama.NewMockWrapper(metadataResponse),
  3612  		})
  3613  
  3614  		// set broker1 to return an error.
  3615  		// Note that the following are not considered errors from the sarama client
  3616  		// consumer's point of view:
  3617  		// - ErrUnknownTopicOrPartition
  3618  		// - ErrNotLeaderForPartition
  3619  		// - ErrLeaderNotAvailable
  3620  		// - ErrReplicaNotAvailable:
  3621  		fetchResponse := &sarama.FetchResponse{}
  3622  		fetchResponse.AddError(env.topic, env.partition, sarama.ErrUnknown)
  3623  		env.broker1.SetHandlerByMap(map[string]sarama.MockResponse{
  3624  			"FetchRequest": sarama.NewMockWrapper(fetchResponse),
  3625  			// answers for CONNECT message
  3626  			"ProduceRequest": sarama.NewMockProduceResponse(t).
  3627  				SetError(env.topic, env.partition, sarama.ErrNoError),
  3628  		})
  3629  
  3630  		select {
  3631  		case <-blocks:
  3632  			t.Fatal("Did not expect new blocks")
  3633  		case <-time.After(mockRetryOptions.NetworkTimeouts.ReadTimeout + shortTimeout):
  3634  			t.Fatal("Errored() should have closed by now")
  3635  		case <-chain.Errored():
  3636  			t.Log("Errored() closed")
  3637  		}
  3638  
  3639  		chain.Halt()
  3640  	})
  3641  
  3642  }
  3643  
  3644  func TestHealthCheck(t *testing.T) {
  3645  	gt := NewGomegaWithT(t)
  3646  	var err error
  3647  
  3648  	ch := newChannel("mockChannelFoo", defaultPartition)
  3649  	mockSyncProducer := &mockkafka.SyncProducer{}
  3650  	chain := &chainImpl{
  3651  		channel:  ch,
  3652  		producer: mockSyncProducer,
  3653  	}
  3654  
  3655  	err = chain.HealthCheck(context.Background())
  3656  	gt.Expect(err).NotTo(HaveOccurred())
  3657  	gt.Expect(mockSyncProducer.SendMessageCallCount()).To(Equal(1))
  3658  
  3659  	payload := protoutil.MarshalOrPanic(newConnectMessage())
  3660  	message := newProducerMessage(chain.channel, payload)
  3661  	gt.Expect(mockSyncProducer.SendMessageArgsForCall(0)).To(Equal(message))
  3662  
  3663  	// Only return error if the error is not for enough replicas
  3664  	mockSyncProducer.SendMessageReturns(int32(1), int64(1), sarama.ErrNotEnoughReplicas)
  3665  	chain.replicaIDs = []int32{int32(1), int32(2)}
  3666  	err = chain.HealthCheck(context.Background())
  3667  	gt.Expect(err).To(MatchError(fmt.Sprintf("[replica ids: [1 2]]: %s", sarama.ErrNotEnoughReplicas.Error())))
  3668  	gt.Expect(mockSyncProducer.SendMessageCallCount()).To(Equal(2))
  3669  
  3670  	// If another type of error is returned, it should be ignored by health check
  3671  	mockSyncProducer.SendMessageReturns(int32(1), int64(1), errors.New("error occurred"))
  3672  	err = chain.HealthCheck(context.Background())
  3673  	gt.Expect(err).NotTo(HaveOccurred())
  3674  	gt.Expect(mockSyncProducer.SendMessageCallCount()).To(Equal(3))
  3675  }
  3676  
  3677  type mockReceiver struct {
  3678  	mock.Mock
  3679  }
  3680  
  3681  func (r *mockReceiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, pending bool) {
  3682  	args := r.Called(msg)
  3683  	return args.Get(0).([][]*cb.Envelope), args.Bool(1)
  3684  }
  3685  
  3686  func (r *mockReceiver) Cut() []*cb.Envelope {
  3687  	args := r.Called()
  3688  	return args.Get(0).([]*cb.Envelope)
  3689  }
  3690  
  3691  type mockConsenterSupport struct {
  3692  	mock.Mock
  3693  }
  3694  
  3695  func (c *mockConsenterSupport) Block(seq uint64) *cb.Block {
  3696  	return nil
  3697  }
  3698  
  3699  func (c *mockConsenterSupport) VerifyBlockSignature([]*protoutil.SignedData, *cb.ConfigEnvelope) error {
  3700  	return nil
  3701  }
  3702  
  3703  func (c *mockConsenterSupport) NewSignatureHeader() (*cb.SignatureHeader, error) {
  3704  	args := c.Called()
  3705  	return args.Get(0).(*cb.SignatureHeader), args.Error(1)
  3706  }
  3707  
  3708  func (c *mockConsenterSupport) Sign(message []byte) ([]byte, error) {
  3709  	args := c.Called(message)
  3710  	return args.Get(0).([]byte), args.Error(1)
  3711  }
  3712  
  3713  func (c *mockConsenterSupport) Serialize() ([]byte, error) {
  3714  	args := c.Called()
  3715  	return args.Get(0).([]byte), args.Error(1)
  3716  }
  3717  
  3718  func (c *mockConsenterSupport) ClassifyMsg(chdr *cb.ChannelHeader) msgprocessor.Classification {
  3719  	args := c.Called(chdr)
  3720  	return args.Get(0).(msgprocessor.Classification)
  3721  }
  3722  
  3723  func (c *mockConsenterSupport) ProcessNormalMsg(env *cb.Envelope) (configSeq uint64, err error) {
  3724  	args := c.Called(env)
  3725  	return args.Get(0).(uint64), args.Error(1)
  3726  }
  3727  
  3728  func (c *mockConsenterSupport) ProcessConfigUpdateMsg(env *cb.Envelope) (config *cb.Envelope, configSeq uint64, err error) {
  3729  	args := c.Called(env)
  3730  	return args.Get(0).(*cb.Envelope), args.Get(1).(uint64), args.Error(2)
  3731  }
  3732  
  3733  func (c *mockConsenterSupport) ProcessConfigMsg(env *cb.Envelope) (config *cb.Envelope, configSeq uint64, err error) {
  3734  	args := c.Called(env)
  3735  	return args.Get(0).(*cb.Envelope), args.Get(1).(uint64), args.Error(2)
  3736  }
  3737  
  3738  func (c *mockConsenterSupport) BlockCutter() blockcutter.Receiver {
  3739  	args := c.Called()
  3740  	return args.Get(0).(blockcutter.Receiver)
  3741  }
  3742  
  3743  func (c *mockConsenterSupport) SharedConfig() channelconfig.Orderer {
  3744  	args := c.Called()
  3745  	return args.Get(0).(channelconfig.Orderer)
  3746  }
  3747  
  3748  func (c *mockConsenterSupport) ChannelConfig() channelconfig.Channel {
  3749  	args := c.Called()
  3750  	return args.Get(0).(channelconfig.Channel)
  3751  }
  3752  
  3753  func (c *mockConsenterSupport) CreateNextBlock(messages []*cb.Envelope) *cb.Block {
  3754  	args := c.Called(messages)
  3755  	return args.Get(0).(*cb.Block)
  3756  }
  3757  
  3758  func (c *mockConsenterSupport) WriteBlock(block *cb.Block, encodedMetadataValue []byte) {
  3759  	c.Called(block, encodedMetadataValue)
  3760  	return
  3761  }
  3762  
  3763  func (c *mockConsenterSupport) WriteConfigBlock(block *cb.Block, encodedMetadataValue []byte) {
  3764  	c.Called(block, encodedMetadataValue)
  3765  	return
  3766  }
  3767  
  3768  func (c *mockConsenterSupport) Sequence() uint64 {
  3769  	args := c.Called()
  3770  	return args.Get(0).(uint64)
  3771  }
  3772  
  3773  func (c *mockConsenterSupport) ChannelID() string {
  3774  	args := c.Called()
  3775  	return args.String(0)
  3776  }
  3777  
  3778  func (c *mockConsenterSupport) Height() uint64 {
  3779  	args := c.Called()
  3780  	return args.Get(0).(uint64)
  3781  }
  3782  
  3783  func (c *mockConsenterSupport) Append(block *cb.Block) error {
  3784  	c.Called(block)
  3785  	return nil
  3786  }