github.com/rohankumardubey/aresdb@v0.0.2-0.20190517170215-e54e3ca06b9c/subscriber/common/consumer/kafka/kafka_confluent_test.go (about)

     1  //  Copyright (c) 2017-2018 Uber Technologies, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package kafka
    16  
    17  import (
    18  	kafkaConfluent "github.com/confluentinc/confluent-kafka-go/kafka"
    19  	. "github.com/onsi/ginkgo"
    20  	. "github.com/onsi/gomega"
    21  	"github.com/uber-go/tally"
    22  	"github.com/uber/aresdb/client"
    23  	"github.com/uber/aresdb/subscriber/common/rules"
    24  	"github.com/uber/aresdb/subscriber/common/tools"
    25  	"github.com/uber/aresdb/subscriber/config"
    26  	"github.com/uber/aresdb/utils"
    27  	"go.uber.org/zap"
    28  	"os"
    29  )
    30  
    31  var _ = Describe("KafkaConsumer", func() {
    32  	serviceConfig := config.ServiceConfig{
    33  		Environment: utils.EnvironmentContext{
    34  			Deployment:         "test",
    35  			RuntimeEnvironment: "test",
    36  			Zone:               "local",
    37  		},
    38  		Logger: zap.NewNop(),
    39  		Scope:  tally.NoopScope,
    40  	}
    41  	serviceConfig.ActiveJobs = []string{"job1"}
    42  	sinkConfig := config.SinkConfig{
    43  		SinkModeStr:           "kafka",
    44  		AresDBConnectorConfig: client.ConnectorConfig{Address: "localhost:8888"},
    45  	}
    46  	serviceConfig.ActiveAresClusters = map[string]config.SinkConfig{
    47  		"dev01": sinkConfig,
    48  	}
    49  
    50  	rootPath := tools.GetModulePath("")
    51  	os.Chdir(rootPath)
    52  	jobConfigs := make(rules.JobConfigs)
    53  	err := rules.AddLocalJobConfig(serviceConfig, jobConfigs)
    54  	if err != nil {
    55  		panic("Failed to AddLocalJobConfig")
    56  	}
    57  	if jobConfigs["job1"]["dev01"] == nil {
    58  		panic("Failed to get (jobConfigs[\"job1\"][\"dev01\"]")
    59  	} else {
    60  		jobConfigs["job1"]["dev01"].AresTableConfig.Cluster = "dev01"
    61  	}
    62  
    63  	It("KafkaConsumer functions", func() {
    64  		kc, err := NewKafkaConsumer(jobConfigs["job1"]["dev01"], serviceConfig)
    65  		Ω(err).Should(BeNil())
    66  		Ω(kc).ShouldNot(BeNil())
    67  
    68  		groupId := kc.Name()
    69  		Ω(groupId).Should(Equal("ares-subscriber_test_job1_dev01_streaming"))
    70  
    71  		topics := kc.Topics()
    72  		len := len(topics)
    73  		Ω(len).Should(Equal(1))
    74  		Ω(topics[0]).Should(Equal("job1-topic"))
    75  
    76  		errCh := kc.Errors()
    77  		Ω(errCh).ShouldNot(BeNil())
    78  
    79  		msgCh := kc.Messages()
    80  		Ω(msgCh).ShouldNot(BeNil())
    81  
    82  		closeCh := kc.Closed()
    83  		Ω(closeCh).ShouldNot(BeNil())
    84  
    85  		go kc.(*KafkaConsumer).startConsuming()
    86  
    87  		topic := "topic"
    88  		msg := &kafkaConfluent.Message{
    89  			TopicPartition: kafkaConfluent.TopicPartition{
    90  				Topic:     &topic,
    91  				Partition: int32(0),
    92  				Offset:    0,
    93  			},
    94  			Value: []byte("value"),
    95  			Key:   []byte("key"),
    96  		}
    97  		msgCounter := map[string]map[int32]tally.Counter{
    98  			"topic": make(map[int32]tally.Counter),
    99  		}
   100  		msgByteCounter := map[string]map[int32]tally.Counter{
   101  			"topic": make(map[int32]tally.Counter),
   102  		}
   103  		msgOffsetGauge := map[string]map[int32]tally.Gauge{
   104  			"topic": make(map[int32]tally.Gauge),
   105  		}
   106  		msgLagGauge := map[string]map[int32]tally.Gauge{
   107  			"topic": make(map[int32]tally.Gauge),
   108  		}
   109  		kc.(*KafkaConsumer).processMsg(msg, msgCounter, msgByteCounter, msgOffsetGauge, msgLagGauge)
   110  
   111  		err = kc.(*KafkaConsumer).Close()
   112  		Ω(err).Should(BeNil())
   113  
   114  		err = kc.(*KafkaConsumer).Close()
   115  		Ω(err).ShouldNot(BeNil())
   116  	})
   117  
   118  	It("KafkaMessage functions", func() {
   119  		topic := "topic"
   120  		message := &KafkaMessage{
   121  			&kafkaConfluent.Message{
   122  				TopicPartition: kafkaConfluent.TopicPartition{
   123  					Topic:     &topic,
   124  					Partition: int32(0),
   125  					Offset:    0,
   126  				},
   127  				Value: []byte("value"),
   128  				Key:   []byte("key"),
   129  			},
   130  			nil,
   131  			"kafka-cluster1",
   132  		}
   133  
   134  		key := message.Key()
   135  		Ω(string(key)).Should(Equal("key"))
   136  
   137  		value := message.Value()
   138  		Ω(string(value)).Should(Equal("value"))
   139  
   140  		cluster := message.Cluster()
   141  		Ω(cluster).Should(Equal("kafka-cluster1"))
   142  
   143  		topic = message.Topic()
   144  		Ω(topic).Should(Equal("topic"))
   145  
   146  		offset := message.Offset()
   147  		Ω(offset).Should(Equal(int64(0)))
   148  
   149  		partition := message.Partition()
   150  		Ω(partition).Should(Equal(int32(0)))
   151  
   152  		message.Ack()
   153  		message.Consumer, _ = NewKafkaConsumer(jobConfigs["job1"]["dev01"], serviceConfig)
   154  
   155  		message.Ack()
   156  
   157  		message.Nack()
   158  
   159  		message.Consumer.Close()
   160  	})
   161  })