github.com/leonlxy/hyperledger@v1.0.0-alpha.0.20170427033203-34922035d248/orderer/kafka/util.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8                   http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package kafka
    18  
    19  import (
    20  	"crypto/tls"
    21  	"crypto/x509"
    22  	"strconv"
    23  
    24  	"github.com/Shopify/sarama"
    25  	"github.com/hyperledger/fabric/orderer/localconfig"
    26  	ab "github.com/hyperledger/fabric/protos/orderer"
    27  )
    28  
    29  func newBrokerConfig(kafkaVersion sarama.KafkaVersion, chosenStaticPartition int32, tlsConfig config.TLS) *sarama.Config {
    30  	brokerConfig := sarama.NewConfig()
    31  
    32  	brokerConfig.Consumer.Return.Errors = true
    33  
    34  	brokerConfig.Net.TLS.Enable = tlsConfig.Enabled
    35  	if brokerConfig.Net.TLS.Enable {
    36  		// create public/private key pair structure
    37  		keyPair, err := tls.X509KeyPair([]byte(tlsConfig.Certificate), []byte(tlsConfig.PrivateKey))
    38  		if err != nil {
    39  			logger.Panicf("Unable to decode public/private key pair: %s", err)
    40  		}
    41  		// create root CA pool
    42  		rootCAs := x509.NewCertPool()
    43  		for _, certificate := range tlsConfig.RootCAs {
    44  			if !rootCAs.AppendCertsFromPEM([]byte(certificate)) {
    45  				logger.Panic("Unable to parse the root certificate authority certificates (Kafka.Tls.RootCAs)")
    46  			}
    47  		}
    48  		brokerConfig.Net.TLS.Config = &tls.Config{
    49  			Certificates: []tls.Certificate{keyPair},
    50  			RootCAs:      rootCAs,
    51  			MinVersion:   0, // TLS 1.0 (no SSL support)
    52  			MaxVersion:   0, // Latest supported TLS version
    53  		}
    54  	}
    55  
    56  	// Set equivalent of Kafka producer config max.request.bytes to the default
    57  	// value of a Kafka broker's socket.request.max.bytes property (100 MiB).
    58  	brokerConfig.Producer.MaxMessageBytes = int(sarama.MaxRequestSize)
    59  	// A partitioner is actually not needed the way we do things now,
    60  	// but we're adding it now to allow for flexibility in the future.
    61  	brokerConfig.Producer.Partitioner = newStaticPartitioner(chosenStaticPartition)
    62  	// Set the level of acknowledgement reliability needed from the broker.
    63  	// WaitForAll means that the partition leader will wait till all ISRs
    64  	// got the message before sending back an ACK to the sender.
    65  	brokerConfig.Producer.RequiredAcks = sarama.WaitForAll
    66  	// An esoteric setting required by the sarama library, see:
    67  	// https://github.com/Shopify/sarama/issues/816
    68  	brokerConfig.Producer.Return.Successes = true
    69  
    70  	brokerConfig.Version = kafkaVersion
    71  
    72  	return brokerConfig
    73  }
    74  
    75  func newConnectMessage() *ab.KafkaMessage {
    76  	return &ab.KafkaMessage{
    77  		Type: &ab.KafkaMessage_Connect{
    78  			Connect: &ab.KafkaMessageConnect{
    79  				Payload: nil,
    80  			},
    81  		},
    82  	}
    83  }
    84  
    85  func newRegularMessage(payload []byte) *ab.KafkaMessage {
    86  	return &ab.KafkaMessage{
    87  		Type: &ab.KafkaMessage_Regular{
    88  			Regular: &ab.KafkaMessageRegular{
    89  				Payload: payload,
    90  			},
    91  		},
    92  	}
    93  }
    94  
    95  func newTimeToCutMessage(blockNumber uint64) *ab.KafkaMessage {
    96  	return &ab.KafkaMessage{
    97  		Type: &ab.KafkaMessage_TimeToCut{
    98  			TimeToCut: &ab.KafkaMessageTimeToCut{
    99  				BlockNumber: blockNumber,
   100  			},
   101  		},
   102  	}
   103  }
   104  
   105  func newProducerMessage(cp ChainPartition, payload []byte) *sarama.ProducerMessage {
   106  	return &sarama.ProducerMessage{
   107  		Topic: cp.Topic(),
   108  		Key:   sarama.StringEncoder(strconv.Itoa(int(cp.Partition()))), // TODO Consider writing an IntEncoder?
   109  		Value: sarama.ByteEncoder(payload),
   110  	}
   111  }
   112  
   113  func newOffsetReq(cp ChainPartition, offset int64) *sarama.OffsetRequest {
   114  	req := &sarama.OffsetRequest{}
   115  	// If offset (seek) == -1, ask for the offset assigned to next new message.
   116  	// If offset (seek) == -2, ask for the earliest available offset.
   117  	// The last parameter in the AddBlock call is needed for God-knows-why reasons.
   118  	// From the Kafka folks themselves: "We agree that this API is slightly funky."
   119  	// https://mail-archives.apache.org/mod_mbox/kafka-users/201411.mbox/%3Cc159383825e04129b77253ffd6c448aa@BY2PR02MB505.namprd02.prod.outlook.com%3E
   120  	req.AddBlock(cp.Topic(), cp.Partition(), offset, 1)
   121  	return req
   122  }