github.com/johnnyeven/libtools@v0.0.0-20191126065708-61829c1adf46/kafka/consumergroup/consumergroup_integration_test.go (about)

     1  package consumergroup
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"os"
     7  	"os/signal"
     8  	"strings"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/wvanbergen/kazoo-go"
    13  	"gopkg.in/Shopify/sarama.v1"
    14  )
    15  
    16  const (
    17  	TopicWithSinglePartition    = "test.1"
    18  	TopicWithMultiplePartitions = "test.4"
    19  )
    20  
    21  var (
    22  	// By default, assume we're using Sarama's vagrant cluster when running tests
    23  	zookeeperPeers = []string{"192.168.100.67:2181", "192.168.100.67:2182", "192.168.100.67:2183", "192.168.100.67:2184", "192.168.100.67:2185"}
    24  	kafkaPeers     = []string{"192.168.100.67:9091", "192.168.100.67:9092", "192.168.100.67:9093", "192.168.100.67:9094", "192.168.100.67:9095"}
    25  )
    26  
    27  func init() {
    28  	if zookeeperPeersEnv := os.Getenv("ZOOKEEPER_PEERS"); zookeeperPeersEnv != "" {
    29  		zookeeperPeers = strings.Split(zookeeperPeersEnv, ",")
    30  	}
    31  	if kafkaPeersEnv := os.Getenv("KAFKA_PEERS"); kafkaPeersEnv != "" {
    32  		kafkaPeers = strings.Split(kafkaPeersEnv, ",")
    33  	}
    34  
    35  	if os.Getenv("DEBUG") != "" {
    36  		sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
    37  	}
    38  
    39  	fmt.Printf("Using Zookeeper cluster at %v\n", zookeeperPeers)
    40  	fmt.Printf("Using Kafka cluster at %v\n", kafkaPeers)
    41  }
    42  
    43  ////////////////////////////////////////////////////////////////////
    44  // Examples
    45  ////////////////////////////////////////////////////////////////////
    46  
    47  func Skip_ExampleConsumerGroup() {
    48  	consumer, consumerErr := JoinConsumerGroup(
    49  		"ExampleConsumerGroup",
    50  		[]string{TopicWithSinglePartition, TopicWithMultiplePartitions},
    51  		zookeeperPeers,
    52  		nil)
    53  
    54  	if consumerErr != nil {
    55  		log.Fatalln(consumerErr)
    56  	}
    57  
    58  	c := make(chan os.Signal, 1)
    59  	signal.Notify(c, os.Interrupt)
    60  	go func() {
    61  		<-c
    62  		consumer.Close()
    63  	}()
    64  
    65  	eventCount := 0
    66  
    67  	for event := range consumer.Messages() {
    68  		// Process event
    69  		log.Println(string(event.Value))
    70  		eventCount += 1
    71  
    72  		// Ack event
    73  		consumer.CommitUpto(event)
    74  	}
    75  
    76  	log.Printf("Processed %d events.", eventCount)
    77  }
    78  
    79  ////////////////////////////////////////////////////////////////////
    80  // Integration tests
    81  ////////////////////////////////////////////////////////////////////
    82  
    83  func Skip_TestIntegrationMultipleTopicsSingleConsumer(t *testing.T) {
    84  	consumerGroup := "TestIntegrationMultipleTopicsSingleConsumer"
    85  	setupZookeeper(t, consumerGroup, TopicWithSinglePartition, 1)
    86  	setupZookeeper(t, consumerGroup, TopicWithMultiplePartitions, 4)
    87  
    88  	// Produce 100 events that we will consume
    89  	go produceEvents(t, consumerGroup, TopicWithSinglePartition, 100)
    90  	go produceEvents(t, consumerGroup, TopicWithMultiplePartitions, 200)
    91  
    92  	consumer, err := JoinConsumerGroup(consumerGroup, []string{TopicWithSinglePartition, TopicWithMultiplePartitions}, zookeeperPeers, nil)
    93  	if err != nil {
    94  		t.Fatal(err)
    95  	}
    96  	defer consumer.Close()
    97  
    98  	var offsets = make(OffsetMap)
    99  	assertEvents(t, consumer, 300, offsets)
   100  }
   101  
   102  func Skip_TestIntegrationSingleTopicParallelConsumers(t *testing.T) {
   103  	consumerGroup := "TestIntegrationSingleTopicParallelConsumers"
   104  	setupZookeeper(t, consumerGroup, TopicWithMultiplePartitions, 4)
   105  	go produceEvents(t, consumerGroup, TopicWithMultiplePartitions, 200)
   106  
   107  	consumer1, err := JoinConsumerGroup(consumerGroup, []string{TopicWithMultiplePartitions}, zookeeperPeers, nil)
   108  	if err != nil {
   109  		t.Fatal(err)
   110  	}
   111  	defer consumer1.Close()
   112  
   113  	consumer2, err := JoinConsumerGroup(consumerGroup, []string{TopicWithMultiplePartitions}, zookeeperPeers, nil)
   114  	if err != nil {
   115  		t.Fatal(err)
   116  	}
   117  	defer consumer2.Close()
   118  
   119  	var eventCount1, eventCount2 int64
   120  	offsets := make(map[int32]int64)
   121  
   122  	events1 := consumer1.Messages()
   123  	events2 := consumer2.Messages()
   124  
   125  	handleEvent := func(message *sarama.ConsumerMessage, ok bool) {
   126  		if !ok {
   127  			t.Fatal("Event stream closed prematurely")
   128  		}
   129  
   130  		if offsets[message.Partition] != 0 && offsets[message.Partition]+1 != message.Offset {
   131  			t.Fatalf("Unecpected offset on partition %d. Expected %d, got %d.", message.Partition, offsets[message.Partition]+1, message.Offset)
   132  		}
   133  
   134  		offsets[message.Partition] = message.Offset
   135  	}
   136  
   137  	for eventCount1+eventCount2 < 200 {
   138  		select {
   139  		case <-time.After(15 * time.Second):
   140  			t.Fatalf("Consumer timeout; read %d instead of %d messages", eventCount1+eventCount2, 200)
   141  
   142  		case event1, ok1 := <-events1:
   143  			handleEvent(event1, ok1)
   144  			eventCount1 += 1
   145  			consumer1.CommitUpto(event1)
   146  
   147  		case event2, ok2 := <-events2:
   148  			handleEvent(event2, ok2)
   149  			eventCount2 += 1
   150  			consumer2.CommitUpto(event2)
   151  		}
   152  	}
   153  
   154  	if eventCount1 == 0 || eventCount2 == 0 {
   155  		t.Error("Expected events to be consumed by both consumers!")
   156  	} else {
   157  		t.Logf("Successfully read %d and %d messages, closing!", eventCount1, eventCount2)
   158  	}
   159  }
   160  
   161  func Skip_TestSingleTopicSequentialConsumer(t *testing.T) {
   162  	consumerGroup := "TestSingleTopicSequentialConsumer"
   163  	setupZookeeper(t, consumerGroup, TopicWithSinglePartition, 1)
   164  	go produceEvents(t, consumerGroup, TopicWithSinglePartition, 20)
   165  
   166  	offsets := make(OffsetMap)
   167  
   168  	// If the channel is buffered, the consumer will enqueue more events in the channel,
   169  	// which assertEvents will simply skip. When consumer 2 starts it will skip a bunch of
   170  	// events because of this. Transactional processing will fix this.
   171  	config := NewConfig()
   172  	config.ChannelBufferSize = 0
   173  
   174  	consumer1, err := JoinConsumerGroup(consumerGroup, []string{TopicWithSinglePartition}, zookeeperPeers, config)
   175  	if err != nil {
   176  		t.Fatal(err)
   177  	}
   178  
   179  	assertEvents(t, consumer1, 10, offsets)
   180  	consumer1.Close()
   181  
   182  	consumer2, err := JoinConsumerGroup(consumerGroup, []string{TopicWithSinglePartition}, zookeeperPeers, nil)
   183  	if err != nil {
   184  		t.Fatal(err)
   185  	}
   186  
   187  	assertEvents(t, consumer2, 10, offsets)
   188  	consumer2.Close()
   189  }
   190  
   191  ////////////////////////////////////////////////////////////////////
   192  // Helper functions and types
   193  ////////////////////////////////////////////////////////////////////
   194  
   195  type OffsetMap map[string]map[int32]int64
   196  
   197  func assertEvents(t *testing.T, cg *ConsumerGroup, count int64, offsets OffsetMap) {
   198  	var processed int64
   199  	for processed < count {
   200  		select {
   201  		case <-time.After(5 * time.Second):
   202  			t.Fatalf("Reader timeout after %d events!", processed)
   203  
   204  		case message, ok := <-cg.Messages():
   205  			if !ok {
   206  				t.Fatal("Event stream closed prematurely")
   207  			}
   208  
   209  			if offsets != nil {
   210  				if offsets[message.Topic] == nil {
   211  					offsets[message.Topic] = make(map[int32]int64)
   212  				}
   213  				if offsets[message.Topic][message.Partition] != 0 && offsets[message.Topic][message.Partition]+1 != message.Offset {
   214  					t.Fatalf("Unexpected offset on %s/%d. Expected %d, got %d.", message.Topic, message.Partition, offsets[message.Topic][message.Partition]+1, message.Offset)
   215  				}
   216  
   217  				processed += 1
   218  				offsets[message.Topic][message.Partition] = message.Offset
   219  
   220  				if os.Getenv("DEBUG") != "" {
   221  					log.Printf("Consumed %d from %s/%d\n", message.Offset, message.Topic, message.Partition)
   222  				}
   223  
   224  				cg.CommitUpto(message)
   225  			}
   226  
   227  		}
   228  	}
   229  	t.Logf("Successfully asserted %d events.", count)
   230  }
   231  
   232  func saramaClient() sarama.Client {
   233  	client, err := sarama.NewClient(kafkaPeers, nil)
   234  	if err != nil {
   235  		panic(err)
   236  	}
   237  	return client
   238  }
   239  
   240  func produceEvents(t *testing.T, consumerGroup string, topic string, amount int64) error {
   241  	producer, err := sarama.NewSyncProducer(kafkaPeers, nil)
   242  	if err != nil {
   243  		return err
   244  	}
   245  	defer producer.Close()
   246  
   247  	for i := int64(1); i <= amount; i++ {
   248  		msg := &sarama.ProducerMessage{Topic: topic, Value: sarama.StringEncoder(fmt.Sprintf("testing %d", i))}
   249  		partition, offset, err := producer.SendMessage(msg)
   250  		if err != nil {
   251  			return err
   252  		}
   253  
   254  		if os.Getenv("DEBUG") != "" {
   255  			log.Printf("Produced message %d to %s/%d.\n", offset, msg.Topic, partition)
   256  		}
   257  	}
   258  
   259  	return nil
   260  }
   261  
   262  func setupZookeeper(t *testing.T, consumerGroup string, topic string, partitions int32) {
   263  	client := saramaClient()
   264  	defer client.Close()
   265  
   266  	// Connect to zookeeper to commit the last seen offset.
   267  	// This way we should only produce events that we produce ourselves in this test.
   268  	kz, err := kazoo.NewKazoo(zookeeperPeers, nil)
   269  	if err != nil {
   270  		t.Fatal(err)
   271  	}
   272  	defer kz.Close()
   273  
   274  	group := kz.Consumergroup(consumerGroup)
   275  	for partition := int32(0); partition < partitions; partition++ {
   276  		// Retrieve the offset that Sarama will use for the next message on the topic/partition.
   277  		nextOffset, offsetErr := client.GetOffset(topic, partition, sarama.OffsetNewest)
   278  		if offsetErr != nil {
   279  			t.Fatal(offsetErr)
   280  		} else {
   281  			t.Logf("Next offset for %s/%d = %d", topic, partition, nextOffset)
   282  		}
   283  
   284  		if err := group.CommitOffset(topic, partition, nextOffset); err != nil {
   285  			t.Fatal(err)
   286  		}
   287  	}
   288  }