github.com/adnan-c/fabric_e2e_couchdb@v0.6.1-preview.0.20170228180935-21ce6b23cf91/orderer/kafka/orderer_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8                   http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package kafka
    18  
    19  import (
    20  	"fmt"
    21  	"sync"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/Shopify/sarama"
    26  	"github.com/hyperledger/fabric/common/configtx/tool/provisional"
    27  	mockconfigvaluesorderer "github.com/hyperledger/fabric/common/mocks/configvalues/channel/orderer"
    28  	"github.com/hyperledger/fabric/orderer/localconfig"
    29  	mockblockcutter "github.com/hyperledger/fabric/orderer/mocks/blockcutter"
    30  	mockmultichain "github.com/hyperledger/fabric/orderer/mocks/multichain"
    31  	"github.com/hyperledger/fabric/orderer/multichain"
    32  	cb "github.com/hyperledger/fabric/protos/common"
    33  	ab "github.com/hyperledger/fabric/protos/orderer"
    34  	"github.com/hyperledger/fabric/protos/utils"
    35  )
    36  
    37  var cp = newChainPartition(provisional.TestChainID, rawPartition)
    38  
    39  func newMockSharedConfigManager() *mockconfigvaluesorderer.SharedConfig {
    40  	return &mockconfigvaluesorderer.SharedConfig{KafkaBrokersVal: testGenesisConf.Orderer.Kafka.Brokers}
    41  }
    42  
    43  type mockConsenterImpl struct {
    44  	consenterImpl
    45  	prodDisk, consDisk chan *ab.KafkaMessage
    46  	consumerSetUp      bool
    47  	t                  *testing.T
    48  }
    49  
    50  func mockNewConsenter(t *testing.T, kafkaVersion sarama.KafkaVersion, retryOptions config.Retry, nextProducedOffset int64) *mockConsenterImpl {
    51  	prodDisk := make(chan *ab.KafkaMessage)
    52  	consDisk := make(chan *ab.KafkaMessage)
    53  
    54  	mockTLS := config.TLS{Enabled: false}
    55  
    56  	mockBfValue := func(brokers []string, cp ChainPartition) (Broker, error) {
    57  		return mockNewBroker(t, cp)
    58  	}
    59  	mockPfValue := func(brokers []string, kafkaVersion sarama.KafkaVersion, retryOptions config.Retry, tls config.TLS) Producer {
    60  		// The first Send on this producer will return a blob with offset #nextProducedOffset
    61  		return mockNewProducer(t, cp, nextProducedOffset, prodDisk)
    62  	}
    63  	mockCfValue := func(brokers []string, kafkaVersion sarama.KafkaVersion, tls config.TLS, cp ChainPartition, lastPersistedOffset int64) (Consumer, error) {
    64  		if lastPersistedOffset != nextProducedOffset {
    65  			panic(fmt.Errorf("Mock objects about to be set up incorrectly (consumer to seek to %d, producer to post %d)", lastPersistedOffset, nextProducedOffset))
    66  		}
    67  		return mockNewConsumer(t, cp, lastPersistedOffset, consDisk)
    68  	}
    69  
    70  	return &mockConsenterImpl{
    71  		consenterImpl: consenterImpl{
    72  			kv:  kafkaVersion,
    73  			ro:  retryOptions,
    74  			tls: mockTLS,
    75  			bf:  mockBfValue,
    76  			pf:  mockPfValue,
    77  			cf:  mockCfValue,
    78  		},
    79  		prodDisk: prodDisk,
    80  		consDisk: consDisk,
    81  		t:        t,
    82  	}
    83  }
    84  
    85  func prepareMockObjectDisks(t *testing.T, co *mockConsenterImpl, ch *chainImpl) {
    86  	// Wait until the mock producer is done before messing around with its disk
    87  	select {
    88  	case <-ch.producer.(*mockProducerImpl).isSetup:
    89  		// Dispense with the CONNECT message that is posted with Start()
    90  		<-co.prodDisk
    91  	case <-time.After(testTimePadding):
    92  		t.Fatal("Mock producer not setup in time")
    93  	}
    94  	// Same for the mock consumer
    95  	select {
    96  	case <-ch.setupChan:
    97  	case <-time.After(testTimePadding):
    98  		t.Fatal("Mock consumer not setup in time")
    99  	}
   100  }
   101  
   102  func syncQueueMessage(msg *cb.Envelope, chain multichain.Chain, bc *mockblockcutter.Receiver) {
   103  	chain.Enqueue(msg)
   104  	bc.Block <- struct{}{}
   105  }
   106  
   107  func waitableSyncQueueMessage(env *cb.Envelope, messagesToPickUp int, wg *sync.WaitGroup,
   108  	co *mockConsenterImpl, cs *mockmultichain.ConsenterSupport, ch *chainImpl) {
   109  	wg.Add(1)
   110  	go func() {
   111  		defer wg.Done()
   112  		for i := 0; i < messagesToPickUp; i++ {
   113  			// On the first iteration of this loop, the message that will be picked up
   114  			// is the one posted via the syncQueueMessage/Enqueue call below
   115  			msg := <-co.prodDisk
   116  			// Place it to the right location so that the mockConsumer can read it
   117  			co.consDisk <- msg
   118  		}
   119  	}()
   120  
   121  	syncQueueMessage(env, ch, cs.BlockCutterVal)
   122  	// The message has already been moved to the consumer's disk,
   123  	// otherwise syncQueueMessage wouldn't return, so the Wait()
   124  	// here is unnecessary but let's be paranoid.
   125  	wg.Wait()
   126  }
   127  
   128  func TestKafkaConsenterEmptyBatch(t *testing.T) {
   129  	var wg sync.WaitGroup
   130  	defer wg.Wait()
   131  	cs := &mockmultichain.ConsenterSupport{
   132  		Batches:         make(chan []*cb.Envelope),
   133  		BlockCutterVal:  mockblockcutter.NewReceiver(),
   134  		ChainIDVal:      provisional.TestChainID,
   135  		SharedConfigVal: &mockconfigvaluesorderer.SharedConfig{BatchTimeoutVal: testTimePadding},
   136  	}
   137  	defer close(cs.BlockCutterVal.Block)
   138  
   139  	lastPersistedOffset := testOldestOffset - 1
   140  	nextProducedOffset := lastPersistedOffset + 1
   141  	co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   142  	ch := newChain(co, cs, lastPersistedOffset)
   143  
   144  	go ch.Start()
   145  	defer ch.Halt()
   146  
   147  	prepareMockObjectDisks(t, co, ch)
   148  
   149  	waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch)
   150  
   151  	// Stop the loop
   152  	ch.Halt()
   153  
   154  	select {
   155  	case <-cs.Batches:
   156  		t.Fatal("Expected no invocations of Append")
   157  	case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great)
   158  	}
   159  }
   160  
   161  func TestKafkaConsenterBatchTimer(t *testing.T) {
   162  	var wg sync.WaitGroup
   163  	defer wg.Wait()
   164  
   165  	batchTimeout, _ := time.ParseDuration("1ms")
   166  	cs := &mockmultichain.ConsenterSupport{
   167  		Batches:         make(chan []*cb.Envelope),
   168  		BlockCutterVal:  mockblockcutter.NewReceiver(),
   169  		ChainIDVal:      provisional.TestChainID,
   170  		SharedConfigVal: &mockconfigvaluesorderer.SharedConfig{BatchTimeoutVal: batchTimeout},
   171  	}
   172  	defer close(cs.BlockCutterVal.Block)
   173  
   174  	lastPersistedOffset := testOldestOffset - 1
   175  	nextProducedOffset := lastPersistedOffset + 1
   176  	co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   177  	ch := newChain(co, cs, lastPersistedOffset)
   178  
   179  	go ch.Start()
   180  	defer ch.Halt()
   181  
   182  	prepareMockObjectDisks(t, co, ch)
   183  
   184  	// The second message that will be picked up is the time-to-cut message
   185  	// that will be posted when the short timer expires
   186  	waitableSyncQueueMessage(newTestEnvelope("one"), 2, &wg, co, cs, ch)
   187  
   188  	select {
   189  	case <-cs.Batches: // This is the success path
   190  	case <-time.After(testTimePadding):
   191  		t.Fatal("Expected block to be cut because batch timer expired")
   192  	}
   193  
   194  	// As above
   195  	waitableSyncQueueMessage(newTestEnvelope("two"), 2, &wg, co, cs, ch)
   196  
   197  	select {
   198  	case <-cs.Batches: // This is the success path
   199  	case <-time.After(testTimePadding):
   200  		t.Fatal("Expected second block to be cut, batch timer not reset")
   201  	}
   202  
   203  	// Stop the loop
   204  	ch.Halt()
   205  
   206  	select {
   207  	case <-cs.Batches:
   208  		t.Fatal("Expected no invocations of Append")
   209  	case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great)
   210  	}
   211  }
   212  
   213  func TestKafkaConsenterTimerHaltOnFilledBatch(t *testing.T) {
   214  	var wg sync.WaitGroup
   215  	defer wg.Wait()
   216  
   217  	batchTimeout, _ := time.ParseDuration("1h")
   218  	cs := &mockmultichain.ConsenterSupport{
   219  		Batches:         make(chan []*cb.Envelope),
   220  		BlockCutterVal:  mockblockcutter.NewReceiver(),
   221  		ChainIDVal:      provisional.TestChainID,
   222  		SharedConfigVal: &mockconfigvaluesorderer.SharedConfig{BatchTimeoutVal: batchTimeout},
   223  	}
   224  	defer close(cs.BlockCutterVal.Block)
   225  
   226  	lastPersistedOffset := testOldestOffset - 1
   227  	nextProducedOffset := lastPersistedOffset + 1
   228  	co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   229  	ch := newChain(co, cs, lastPersistedOffset)
   230  
   231  	go ch.Start()
   232  	defer ch.Halt()
   233  
   234  	prepareMockObjectDisks(t, co, ch)
   235  
   236  	waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch)
   237  
   238  	cs.BlockCutterVal.CutNext = true
   239  
   240  	waitableSyncQueueMessage(newTestEnvelope("two"), 1, &wg, co, cs, ch)
   241  
   242  	select {
   243  	case <-cs.Batches:
   244  	case <-time.After(testTimePadding):
   245  		t.Fatal("Expected block to be cut because batch timer expired")
   246  	}
   247  
   248  	// Change the batch timeout to be near instant.
   249  	// If the timer was not reset, it will still be waiting an hour.
   250  	ch.batchTimeout = time.Millisecond
   251  
   252  	cs.BlockCutterVal.CutNext = false
   253  
   254  	// The second message that will be picked up is the time-to-cut message
   255  	// that will be posted when the short timer expires
   256  	waitableSyncQueueMessage(newTestEnvelope("three"), 2, &wg, co, cs, ch)
   257  
   258  	select {
   259  	case <-cs.Batches:
   260  	case <-time.After(testTimePadding):
   261  		t.Fatalf("Did not cut the second block, indicating that the old timer was still running")
   262  	}
   263  
   264  	// Stop the loop
   265  	ch.Halt()
   266  
   267  	select {
   268  	case <-cs.Batches:
   269  		t.Fatal("Expected no invocations of Append")
   270  	case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great)
   271  	}
   272  }
   273  
   274  func TestKafkaConsenterConfigStyleMultiBatch(t *testing.T) {
   275  	var wg sync.WaitGroup
   276  	defer wg.Wait()
   277  
   278  	cs := &mockmultichain.ConsenterSupport{
   279  		Batches:         make(chan []*cb.Envelope),
   280  		BlockCutterVal:  mockblockcutter.NewReceiver(),
   281  		ChainIDVal:      provisional.TestChainID,
   282  		SharedConfigVal: &mockconfigvaluesorderer.SharedConfig{BatchTimeoutVal: testTimePadding},
   283  	}
   284  	defer close(cs.BlockCutterVal.Block)
   285  
   286  	lastPersistedOffset := testOldestOffset - 1
   287  	nextProducedOffset := lastPersistedOffset + 1
   288  	co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   289  	ch := newChain(co, cs, lastPersistedOffset)
   290  
   291  	go ch.Start()
   292  	defer ch.Halt()
   293  
   294  	prepareMockObjectDisks(t, co, ch)
   295  
   296  	waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch)
   297  
   298  	cs.BlockCutterVal.IsolatedTx = true
   299  
   300  	waitableSyncQueueMessage(newTestEnvelope("two"), 1, &wg, co, cs, ch)
   301  
   302  	ch.Halt()
   303  
   304  	select {
   305  	case <-cs.Batches:
   306  	case <-time.After(testTimePadding):
   307  		t.Fatal("Expected two blocks to be cut but never got the first")
   308  	}
   309  
   310  	select {
   311  	case <-cs.Batches:
   312  	case <-time.After(testTimePadding):
   313  		t.Fatal("Expected the config type tx to create two blocks, but only got the first")
   314  	}
   315  
   316  	select {
   317  	case <-time.After(testTimePadding):
   318  		t.Fatal("Should have exited")
   319  	case <-ch.haltedChan:
   320  	}
   321  }
   322  
   323  func TestKafkaConsenterTimeToCutForced(t *testing.T) {
   324  	var wg sync.WaitGroup
   325  	defer wg.Wait()
   326  
   327  	batchTimeout, _ := time.ParseDuration("1h")
   328  	cs := &mockmultichain.ConsenterSupport{
   329  		Batches:         make(chan []*cb.Envelope),
   330  		BlockCutterVal:  mockblockcutter.NewReceiver(),
   331  		ChainIDVal:      provisional.TestChainID,
   332  		SharedConfigVal: &mockconfigvaluesorderer.SharedConfig{BatchTimeoutVal: batchTimeout},
   333  	}
   334  	defer close(cs.BlockCutterVal.Block)
   335  
   336  	lastPersistedOffset := testOldestOffset - 1
   337  	nextProducedOffset := lastPersistedOffset + 1
   338  	co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   339  	ch := newChain(co, cs, lastPersistedOffset)
   340  
   341  	go ch.Start()
   342  	defer ch.Halt()
   343  
   344  	prepareMockObjectDisks(t, co, ch)
   345  
   346  	waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch)
   347  
   348  	cs.BlockCutterVal.CutNext = true
   349  
   350  	// This is like the waitableSyncQueueMessage routine with the difference
   351  	// that we post a time-to-cut message instead of a test envelope.
   352  	wg.Add(1)
   353  	go func() {
   354  		defer wg.Done()
   355  		msg := <-co.prodDisk
   356  		co.consDisk <- msg
   357  	}()
   358  
   359  	if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock+1))); err != nil {
   360  		t.Fatalf("Couldn't post to %s: %s", ch.partition, err)
   361  	}
   362  	wg.Wait()
   363  
   364  	select {
   365  	case <-cs.Batches:
   366  	case <-time.After(testTimePadding):
   367  		t.Fatal("Expected block to be cut because proper time-to-cut was sent")
   368  	}
   369  
   370  	// Stop the loop
   371  	ch.Halt()
   372  
   373  	select {
   374  	case <-cs.Batches:
   375  		t.Fatal("Expected no invocations of Append")
   376  	case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great)
   377  	}
   378  }
   379  
   380  func TestKafkaConsenterTimeToCutDuplicate(t *testing.T) {
   381  	var wg sync.WaitGroup
   382  	defer wg.Wait()
   383  
   384  	batchTimeout, _ := time.ParseDuration("1h")
   385  	cs := &mockmultichain.ConsenterSupport{
   386  		Batches:         make(chan []*cb.Envelope),
   387  		BlockCutterVal:  mockblockcutter.NewReceiver(),
   388  		ChainIDVal:      provisional.TestChainID,
   389  		SharedConfigVal: &mockconfigvaluesorderer.SharedConfig{BatchTimeoutVal: batchTimeout},
   390  	}
   391  	defer close(cs.BlockCutterVal.Block)
   392  
   393  	lastPersistedOffset := testOldestOffset - 1
   394  	nextProducedOffset := lastPersistedOffset + 1
   395  	co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   396  	ch := newChain(co, cs, lastPersistedOffset)
   397  
   398  	go ch.Start()
   399  	defer ch.Halt()
   400  
   401  	prepareMockObjectDisks(t, co, ch)
   402  
   403  	waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch)
   404  
   405  	cs.BlockCutterVal.CutNext = true
   406  
   407  	// This is like the waitableSyncQueueMessage routine with the difference
   408  	// that we post a time-to-cut message instead of a test envelope.
   409  	wg.Add(1)
   410  	go func() {
   411  		defer wg.Done()
   412  		msg := <-co.prodDisk
   413  		co.consDisk <- msg
   414  	}()
   415  
   416  	// Send a proper time-to-cut message
   417  	if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock+1))); err != nil {
   418  		t.Fatalf("Couldn't post to %s: %s", ch.partition, err)
   419  	}
   420  	wg.Wait()
   421  
   422  	select {
   423  	case <-cs.Batches:
   424  	case <-time.After(testTimePadding):
   425  		t.Fatal("Expected block to be cut because proper time-to-cut was sent")
   426  	}
   427  
   428  	cs.BlockCutterVal.CutNext = false
   429  
   430  	waitableSyncQueueMessage(newTestEnvelope("two"), 1, &wg, co, cs, ch)
   431  
   432  	cs.BlockCutterVal.CutNext = true
   433  	// ATTN: We set `cs.BlockCutterVal.CutNext` to true on purpose
   434  	// If the logic works right, the orderer should discard the
   435  	// duplicate TTC message below and a call to the block cutter
   436  	// will only happen when the long, hour-long timer expires
   437  
   438  	// As above
   439  	wg.Add(1)
   440  	go func() {
   441  		defer wg.Done()
   442  		msg := <-co.prodDisk
   443  		co.consDisk <- msg
   444  	}()
   445  
   446  	// Send a duplicate time-to-cut message
   447  	if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock))); err != nil {
   448  		t.Fatalf("Couldn't post to %s: %s", ch.partition, err)
   449  	}
   450  	wg.Wait()
   451  
   452  	select {
   453  	case <-cs.Batches:
   454  		t.Fatal("Should have discarded duplicate time-to-cut")
   455  	case <-time.After(testTimePadding):
   456  		// This is the success path
   457  	}
   458  
   459  	// Stop the loop
   460  	ch.Halt()
   461  
   462  	select {
   463  	case <-cs.Batches:
   464  		t.Fatal("Expected no invocations of Append")
   465  	case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great)
   466  	}
   467  }
   468  
   469  func TestKafkaConsenterTimeToCutStale(t *testing.T) {
   470  	var wg sync.WaitGroup
   471  	defer wg.Wait()
   472  
   473  	batchTimeout, _ := time.ParseDuration("1h")
   474  	cs := &mockmultichain.ConsenterSupport{
   475  		Batches:         make(chan []*cb.Envelope),
   476  		BlockCutterVal:  mockblockcutter.NewReceiver(),
   477  		ChainIDVal:      provisional.TestChainID,
   478  		SharedConfigVal: &mockconfigvaluesorderer.SharedConfig{BatchTimeoutVal: batchTimeout},
   479  	}
   480  	defer close(cs.BlockCutterVal.Block)
   481  
   482  	lastPersistedOffset := testOldestOffset - 1
   483  	nextProducedOffset := lastPersistedOffset + 1
   484  	co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   485  	ch := newChain(co, cs, lastPersistedOffset)
   486  
   487  	go ch.Start()
   488  	defer ch.Halt()
   489  
   490  	prepareMockObjectDisks(t, co, ch)
   491  
   492  	waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch)
   493  
   494  	cs.BlockCutterVal.CutNext = true
   495  
   496  	// This is like the waitableSyncQueueMessage routine with the difference
   497  	// that we post a time-to-cut message instead of a test envelope.
   498  	wg.Add(1)
   499  	go func() {
   500  		defer wg.Done()
   501  		msg := <-co.prodDisk
   502  		co.consDisk <- msg
   503  	}()
   504  
   505  	// Send a stale time-to-cut message
   506  	if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock))); err != nil {
   507  		t.Fatalf("Couldn't post to %s: %s", ch.partition, err)
   508  	}
   509  	wg.Wait()
   510  
   511  	select {
   512  	case <-cs.Batches:
   513  		t.Fatal("Should have ignored stale time-to-cut")
   514  	case <-time.After(testTimePadding):
   515  		// This is the success path
   516  	}
   517  
   518  	// Stop the loop
   519  	ch.Halt()
   520  
   521  	select {
   522  	case <-cs.Batches:
   523  		t.Fatal("Expected no invocations of Append")
   524  	case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great)
   525  	}
   526  }
   527  
   528  func TestKafkaConsenterTimeToCutLarger(t *testing.T) {
   529  	var wg sync.WaitGroup
   530  	defer wg.Wait()
   531  
   532  	batchTimeout, _ := time.ParseDuration("1h")
   533  	cs := &mockmultichain.ConsenterSupport{
   534  		Batches:         make(chan []*cb.Envelope),
   535  		BlockCutterVal:  mockblockcutter.NewReceiver(),
   536  		ChainIDVal:      provisional.TestChainID,
   537  		SharedConfigVal: &mockconfigvaluesorderer.SharedConfig{BatchTimeoutVal: batchTimeout},
   538  	}
   539  	defer close(cs.BlockCutterVal.Block)
   540  
   541  	lastPersistedOffset := testOldestOffset - 1
   542  	nextProducedOffset := lastPersistedOffset + 1
   543  	co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   544  	ch := newChain(co, cs, lastPersistedOffset)
   545  
   546  	go ch.Start()
   547  	defer ch.Halt()
   548  
   549  	prepareMockObjectDisks(t, co, ch)
   550  
   551  	waitableSyncQueueMessage(newTestEnvelope("one"), 1, &wg, co, cs, ch)
   552  
   553  	cs.BlockCutterVal.CutNext = true
   554  
   555  	// This is like the waitableSyncQueueMessage routine with the difference
   556  	// that we post a time-to-cut message instead of a test envelope.
   557  	wg.Add(1)
   558  	go func() {
   559  		defer wg.Done()
   560  		msg := <-co.prodDisk
   561  		co.consDisk <- msg
   562  	}()
   563  
   564  	// Send a stale time-to-cut message
   565  	if err := ch.producer.Send(ch.partition, utils.MarshalOrPanic(newTimeToCutMessage(ch.lastCutBlock+2))); err != nil {
   566  		t.Fatalf("Couldn't post to %s: %s", ch.partition, err)
   567  	}
   568  	wg.Wait()
   569  
   570  	select {
   571  	case <-cs.Batches:
   572  		t.Fatal("Should have ignored larger time-to-cut than expected")
   573  	case <-time.After(testTimePadding):
   574  		// This is the success path
   575  	}
   576  
   577  	// Loop is already stopped, but this is a good test to see
   578  	// if a second invokation of Halt() panicks. (It shouldn't.)
   579  	defer func() {
   580  		if r := recover(); r != nil {
   581  			t.Fatal("Expected duplicate call to Halt to succeed")
   582  		}
   583  	}()
   584  
   585  	ch.Halt()
   586  
   587  	select {
   588  	case <-cs.Batches:
   589  		t.Fatal("Expected no invocations of Append")
   590  	case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great)
   591  	}
   592  }
   593  
   594  func TestGetLastOffsetPersistedEmpty(t *testing.T) {
   595  	expected := sarama.OffsetOldest - 1
   596  	actual := getLastOffsetPersisted(&cb.Metadata{})
   597  	if actual != expected {
   598  		t.Fatalf("Expected last offset %d, got %d", expected, actual)
   599  	}
   600  }
   601  
   602  func TestGetLastOffsetPersistedRight(t *testing.T) {
   603  	expected := int64(100)
   604  	actual := getLastOffsetPersisted(&cb.Metadata{Value: utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: expected})})
   605  	if actual != expected {
   606  		t.Fatalf("Expected last offset %d, got %d", expected, actual)
   607  	}
   608  }
   609  
   610  func TestKafkaConsenterRestart(t *testing.T) {
   611  	var wg sync.WaitGroup
   612  	defer wg.Wait()
   613  
   614  	batchTimeout, _ := time.ParseDuration("1ms")
   615  	cs := &mockmultichain.ConsenterSupport{
   616  		Batches:         make(chan []*cb.Envelope),
   617  		BlockCutterVal:  mockblockcutter.NewReceiver(),
   618  		ChainIDVal:      provisional.TestChainID,
   619  		SharedConfigVal: &mockconfigvaluesorderer.SharedConfig{BatchTimeoutVal: batchTimeout},
   620  	}
   621  	defer close(cs.BlockCutterVal.Block)
   622  
   623  	lastPersistedOffset := testOldestOffset - 1
   624  	nextProducedOffset := lastPersistedOffset + 1
   625  	co := mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   626  	ch := newChain(co, cs, lastPersistedOffset)
   627  
   628  	go ch.Start()
   629  	defer ch.Halt()
   630  
   631  	prepareMockObjectDisks(t, co, ch)
   632  
   633  	// The second message that will be picked up is the time-to-cut message
   634  	// that will be posted when the short timer expires
   635  	waitableSyncQueueMessage(newTestEnvelope("one"), 2, &wg, co, cs, ch)
   636  
   637  	select {
   638  	case <-cs.Batches: // This is the success path
   639  	case <-time.After(testTimePadding):
   640  		t.Fatal("Expected block to be cut because batch timer expired")
   641  	}
   642  
   643  	// Stop the loop
   644  	ch.Halt()
   645  
   646  	select {
   647  	case <-cs.Batches:
   648  		t.Fatal("Expected no invocations of Append")
   649  	case <-ch.haltedChan: // If we're here, we definitely had a chance to invoke Append but didn't (which is great)
   650  	}
   651  
   652  	lastBlock := cs.WriteBlockVal
   653  	metadata, err := utils.GetMetadataFromBlock(lastBlock, cb.BlockMetadataIndex_ORDERER)
   654  	if err != nil {
   655  		logger.Fatalf("Error extracting orderer metadata for chain %x: %s", cs.ChainIDVal, err)
   656  	}
   657  
   658  	lastPersistedOffset = getLastOffsetPersisted(metadata)
   659  	nextProducedOffset = lastPersistedOffset + 1
   660  
   661  	co = mockNewConsenter(t, testConf.Kafka.Version, testConf.Kafka.Retry, nextProducedOffset)
   662  	ch = newChain(co, cs, lastPersistedOffset)
   663  	go ch.Start()
   664  	prepareMockObjectDisks(t, co, ch)
   665  
   666  	actual := ch.producer.(*mockProducerImpl).producedOffset
   667  	if actual != nextProducedOffset {
   668  		t.Fatalf("Restarted orderer post-connect should have been at offset %d, got %d instead", nextProducedOffset, actual)
   669  	}
   670  }