github.com/adnan-c/fabric_e2e_couchdb@v0.6.1-preview.0.20170228180935-21ce6b23cf91/orderer/kafka/producer_mock_test.go (about) 1 /* 2 Copyright IBM Corp. 2016 All Rights Reserved. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package kafka 18 19 import ( 20 "fmt" 21 "testing" 22 23 "github.com/Shopify/sarama/mocks" 24 "github.com/golang/protobuf/proto" 25 ab "github.com/hyperledger/fabric/protos/orderer" 26 "github.com/hyperledger/fabric/protos/utils" 27 ) 28 29 type mockProducerImpl struct { 30 producer *mocks.SyncProducer 31 checker mocks.ValueChecker 32 33 // This simulates the broker's "disk" where the producer's 34 // blobs for a certain chain partition eventually end up. 35 disk chan *ab.KafkaMessage 36 producedOffset int64 37 isSetup chan struct{} 38 t *testing.T 39 } 40 41 // Create a new producer whose next "Send" on ChainPartition gives you blob #offset. 42 func mockNewProducer(t *testing.T, cp ChainPartition, offset int64, disk chan *ab.KafkaMessage) Producer { 43 mp := &mockProducerImpl{ 44 producer: mocks.NewSyncProducer(t, nil), 45 checker: nil, 46 disk: disk, 47 producedOffset: 0, 48 isSetup: make(chan struct{}), 49 t: t, 50 } 51 mp.init(cp, offset) 52 53 if mp.producedOffset == offset-1 { 54 close(mp.isSetup) 55 } else { 56 mp.t.Fatal("Mock producer failed to initialize itself properly") 57 } 58 59 return mp 60 } 61 62 func (mp *mockProducerImpl) Send(cp ChainPartition, payload []byte) error { 63 mp.producer.ExpectSendMessageWithCheckerFunctionAndSucceed(mp.checker) 64 mp.producedOffset++ // This is the offset that will be assigned to the sent message 65 if _, ofs, err := mp.producer.SendMessage(newProducerMessage(cp, payload)); err != nil || ofs != mp.producedOffset { 66 // We do NOT check the assigned partition because the mock 67 // producer always posts to partition 0 no matter what. 68 // This is a deficiency of the Kafka library that we use. 69 mp.t.Fatal("Mock producer not functioning as expected") 70 } 71 msg := new(ab.KafkaMessage) 72 if err := proto.Unmarshal(payload, msg); err != nil { 73 mp.t.Fatalf("Failed to unmarshal message that reached producer's disk: %s", err) 74 } 75 mp.disk <- msg // Reaches the cluster's disk for that chain partition 76 return nil 77 } 78 79 func (mp *mockProducerImpl) Close() error { 80 return mp.producer.Close() 81 } 82 83 // Initializes the mock producer by setting up the offsets. 84 func (mp *mockProducerImpl) init(cp ChainPartition, offset int64) { 85 if offset >= testOldestOffset && offset <= (testNewestOffset-1) { 86 // Prepare the producer so that the next Send 87 // on that chain partition gives you blob #offset. 88 mp.testFillWithBlocks(cp, offset-1) 89 } else { 90 panic(fmt.Errorf("Out of range offset (seek number) given to producer: %d", offset)) 91 } 92 } 93 94 func (mp *mockProducerImpl) testFillWithBlocks(cp ChainPartition, offset int64) { 95 dieChan := make(chan struct{}) 96 deadChan := make(chan struct{}) 97 98 go func() { // This goroutine is meant to read only the "fill-in" blocks 99 for { 100 select { 101 case <-mp.disk: 102 case <-dieChan: 103 close(deadChan) 104 return 105 } 106 } 107 }() 108 109 for i := int64(1); i <= offset; i++ { 110 mp.Send(cp, utils.MarshalOrPanic(newRegularMessage(utils.MarshalOrPanic(newTestEnvelope(fmt.Sprintf("producer fill-in %d", i)))))) 111 } 112 113 close(dieChan) 114 <-deadChan 115 116 return 117 }