github.phpd.cn/cilium/cilium@v1.6.12/test/runtime/kafka.go (about)

     1  // Copyright 2017 Authors of Cilium
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package RuntimeTest
    16  
    17  import (
    18  	"context"
    19  	"fmt"
    20  
    21  	. "github.com/cilium/cilium/test/ginkgo-ext"
    22  	"github.com/cilium/cilium/test/helpers"
    23  	"github.com/cilium/cilium/test/helpers/constants"
    24  
    25  	. "github.com/onsi/gomega"
    26  )
    27  
    28  var _ = Describe("RuntimeKafka", func() {
    29  
    30  	var (
    31  		vm          *helpers.SSHMeta
    32  		monitorStop = func() error { return nil }
    33  
    34  		allowedTopic  = "allowedTopic"
    35  		disallowTopic = "disallowTopic"
    36  		topicTest     = "test-topic"
    37  		listTopicsCmd = "/opt/kafka/bin/kafka-topics.sh --list --zookeeper zook:2181"
    38  		MaxMessages   = 5
    39  		client        = "client"
    40  	)
    41  
    42  	containers := func(mode string) {
    43  
    44  		images := map[string]string{
    45  			"zook":   constants.ZookeeperImage,
    46  			"client": constants.KafkaClientImage,
    47  		}
    48  
    49  		switch mode {
    50  		case "create":
    51  			for k, v := range images {
    52  				vm.ContainerCreate(k, v, helpers.CiliumDockerNetwork, fmt.Sprintf("-l id.%s", k))
    53  			}
    54  			zook, err := vm.ContainerInspectNet("zook")
    55  			Expect(err).Should(BeNil())
    56  
    57  			vm.ContainerCreate("kafka", constants.KafkaImage, helpers.CiliumDockerNetwork, fmt.Sprintf(
    58  				"-l id.kafka -e KAFKA_ZOOKEEPER_CONNECT=%s:2181 -e KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS=60000 -e KAFKA_LISTENERS=PLAINTEXT://:9092 -e KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=60000", zook["IPv4"]))
    59  
    60  		case "delete":
    61  			for k := range images {
    62  				vm.ContainerRm(k)
    63  			}
    64  			vm.ContainerRm("kafka")
    65  		}
    66  	}
    67  
    68  	createTopicCmd := func(topic string) string {
    69  		return fmt.Sprintf("/opt/kafka/bin/kafka-topics.sh --create --zookeeper zook:2181 "+
    70  			"--replication-factor 1 --partitions 1 --topic %s", topic)
    71  	}
    72  
    73  	createTopic := func(topic string) {
    74  		logger.Infof("Creating new kafka topic %s", topic)
    75  		res := vm.ContainerExec(client, createTopicCmd(topic))
    76  		res.ExpectSuccess("Unable to create topic  %s", topic)
    77  	}
    78  
    79  	consumerCmd := func(topic string, maxMsg int) string {
    80  		return fmt.Sprintf("/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server "+
    81  			"kafka:9092 --topic %s --max-messages %d --timeout-ms 300000 --from-beginning",
    82  			topic, maxMsg)
    83  	}
    84  
    85  	consumer := func(topic string, maxMsg int) *helpers.CmdRes {
    86  		return vm.ContainerExec(client, consumerCmd(topic, maxMsg))
    87  	}
    88  
    89  	producer := func(topic string, message string) {
    90  		cmd := fmt.Sprintf(
    91  			"echo %s | docker exec -i %s /opt/kafka/bin/kafka-console-producer.sh "+
    92  				"--broker-list kafka:9092 --topic %s",
    93  			message, client, topic)
    94  		vm.Exec(cmd)
    95  	}
    96  
    97  	// WaitKafkaBroker waits for the broker to be ready, by executing
    98  	// a command repeatedly until it succeeds, or a timeout occurs
    99  	waitForKafkaBroker := func(pod string, cmd string) error {
   100  		body := func() bool {
   101  			res := vm.ContainerExec(pod, cmd)
   102  			return res.WasSuccessful()
   103  		}
   104  		err := helpers.WithTimeout(body, "Kafka Broker not ready", &helpers.TimeoutConfig{Timeout: helpers.HelperTimeout})
   105  		return err
   106  	}
   107  
   108  	BeforeAll(func() {
   109  		vm = helpers.InitRuntimeHelper(helpers.Runtime, logger)
   110  		ExpectCiliumReady(vm)
   111  
   112  		status := vm.ExecCilium(fmt.Sprintf("config %s=true",
   113  			helpers.OptionConntrackLocal))
   114  		status.ExpectSuccess()
   115  
   116  		containers("create")
   117  		epsReady := vm.WaitEndpointsReady()
   118  		Expect(epsReady).Should(BeTrue(), "Endpoints are not ready after timeout")
   119  
   120  		err := waitForKafkaBroker(client, createTopicCmd(topicTest))
   121  		Expect(err).To(BeNil(), "Kafka broker failed to come up")
   122  
   123  		By("Creating kafka topics")
   124  		createTopic(allowedTopic)
   125  		createTopic(disallowTopic)
   126  
   127  		By("Listing created Kafka topics")
   128  		res := vm.ContainerExec(client, listTopicsCmd)
   129  		res.ExpectSuccess("Cannot list kafka topics")
   130  	})
   131  
   132  	AfterEach(func() {
   133  		vm.PolicyDelAll()
   134  
   135  	})
   136  
   137  	AfterAll(func() {
   138  		containers("delete")
   139  
   140  		status := vm.ExecCilium(fmt.Sprintf("config %s=false",
   141  			helpers.OptionConntrackLocal))
   142  		status.ExpectSuccess()
   143  
   144  		vm.CloseSSHClient()
   145  	})
   146  
   147  	JustBeforeEach(func() {
   148  		monitorStop = vm.MonitorStart()
   149  	})
   150  
   151  	JustAfterEach(func() {
   152  		vm.ValidateNoErrorsInLogs(CurrentGinkgoTestDescription().Duration)
   153  		Expect(monitorStop()).To(BeNil(), "cannot stop monitor command")
   154  	})
   155  
   156  	AfterFailed(func() {
   157  		vm.ReportFailed("cilium policy get")
   158  	})
   159  
   160  	It("Kafka Policy Ingress", func() {
   161  		_, err := vm.PolicyImportAndWait(vm.GetFullPath("Policies-kafka.json"), helpers.HelperTimeout)
   162  		Expect(err).Should(BeNil())
   163  
   164  		endPoints, err := vm.PolicyEndpointsSummary()
   165  		Expect(err).Should(BeNil(), "Cannot get endpoint list")
   166  		Expect(endPoints[helpers.Enabled]).To(Equal(1),
   167  			"Check number of endpoints with policy enforcement enabled")
   168  		Expect(endPoints[helpers.Disabled]).To(Equal(2),
   169  			"Check number of endpoints with policy enforcement disabled")
   170  
   171  		By("Allowed topic")
   172  
   173  		By("Sending produce request on kafka topic `allowedTopic`")
   174  		for i := 1; i <= MaxMessages; i++ {
   175  			producer(allowedTopic, fmt.Sprintf("Message %d", i))
   176  		}
   177  
   178  		By("Sending consume request on kafka topic `allowedTopic`")
   179  		res := consumer(allowedTopic, MaxMessages)
   180  		res.ExpectSuccess("Failed to consume messages from kafka topic `allowedTopic`")
   181  		Expect(res.CombineOutput().String()).
   182  			Should(ContainSubstring("Processed a total of %d messages", MaxMessages),
   183  				"Kafka did not process the expected number of messages")
   184  
   185  		By("Disable topic")
   186  		res = consumer(disallowTopic, MaxMessages)
   187  		res.ExpectFail("Kafka consumer can access to disallowTopic")
   188  	})
   189  
   190  	It("Kafka Policy Role Ingress", func() {
   191  		_, err := vm.PolicyImportAndWait(vm.GetFullPath("Policies-kafka-Role.json"), helpers.HelperTimeout)
   192  		Expect(err).Should(BeNil(), "Expected nil got %s while importing policy Policies-kafka-Role.json", err)
   193  
   194  		endPoints, err := vm.PolicyEndpointsSummary()
   195  		Expect(err).Should(BeNil(), "Expect nil. Failed to apply policy on all endpoints with error :%s", err)
   196  		Expect(endPoints[helpers.Enabled]).To(Equal(1), "Expected 1 endpoint to be policy enabled. Policy enforcement failed")
   197  		Expect(endPoints[helpers.Disabled]).To(Equal(2), "Expected 2 endpoint to be policy disabled. Policy enforcement failed")
   198  
   199  		By("Sending produce request on kafka topic `allowedTopic`")
   200  		for i := 1; i <= MaxMessages; i++ {
   201  			producer(allowedTopic, fmt.Sprintf("Message %d", i))
   202  		}
   203  
   204  		By("Sending consume request on kafka topic `allowedTopic`")
   205  		res := consumer(allowedTopic, MaxMessages)
   206  		res.ExpectSuccess("Failed to consume messages from kafka topic `allowedTopic`")
   207  		Expect(res.CombineOutput().String()).
   208  			Should(ContainSubstring("Processed a total of %d messages", MaxMessages),
   209  				"Kafka did not process the expected number of messages")
   210  
   211  		By("Disable topic")
   212  		// Consumer timeout didn't work correctly, so make sure that AUTH is present in the reply
   213  		ctx, cancel := context.WithCancel(context.Background())
   214  		defer cancel()
   215  		res = vm.ExecInBackground(ctx, fmt.Sprintf(
   216  			"docker exec -i %s %s", client, consumerCmd(disallowTopic, MaxMessages)))
   217  		err = res.WaitUntilMatch("{disallowTopic=TOPIC_AUTHORIZATION_FAILED}")
   218  		Expect(err).To(BeNil(), "Traffic in disallowTopic is allowed")
   219  	})
   220  })