github.com/confluentinc/confluent-kafka-go@v1.9.2/kafka/handle.go (about)

     1  /**
     2   * Copyright 2016 Confluent Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   * http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package kafka
    18  
    19  import (
    20  	"fmt"
    21  	"strings"
    22  	"sync"
    23  	"time"
    24  	"unsafe"
    25  )
    26  
    27  /*
    28  #include "select_rdkafka.h"
    29  #include <stdlib.h>
    30  */
    31  import "C"
    32  
    33  // OAuthBearerToken represents the data to be transmitted
    34  // to a broker during SASL/OAUTHBEARER authentication.
    35  type OAuthBearerToken struct {
    36  	// Token value, often (but not necessarily) a JWS compact serialization
    37  	// as per https://tools.ietf.org/html/rfc7515#section-3.1; it must meet
    38  	// the regular expression for a SASL/OAUTHBEARER value defined at
    39  	// https://tools.ietf.org/html/rfc7628#section-3.1
    40  	TokenValue string
    41  	// Metadata about the token indicating when it expires (local time);
    42  	// it must represent a time in the future
    43  	Expiration time.Time
    44  	// Metadata about the token indicating the Kafka principal name
    45  	// to which it applies (for example, "admin")
    46  	Principal string
    47  	// SASL extensions, if any, to be communicated to the broker during
    48  	// authentication (all keys and values of which must meet the regular
    49  	// expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1,
    50  	// and it must not contain the reserved "auth" key)
    51  	Extensions map[string]string
    52  }
    53  
    54  // Handle represents a generic client handle containing common parts for
    55  // both Producer and Consumer.
    56  type Handle interface {
    57  	// SetOAuthBearerToken sets the the data to be transmitted
    58  	// to a broker during SASL/OAUTHBEARER authentication. It will return nil
    59  	// on success, otherwise an error if:
    60  	// 1) the token data is invalid (meaning an expiration time in the past
    61  	// or either a token value or an extension key or value that does not meet
    62  	// the regular expression requirements as per
    63  	// https://tools.ietf.org/html/rfc7628#section-3.1);
    64  	// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
    65  	// 3) SASL/OAUTHBEARER is supported but is not configured as the client's
    66  	// authentication mechanism.
    67  	SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
    68  
    69  	// SetOAuthBearerTokenFailure sets the error message describing why token
    70  	// retrieval/setting failed; it also schedules a new token refresh event for 10
    71  	// seconds later so the attempt may be retried. It will return nil on
    72  	// success, otherwise an error if:
    73  	// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
    74  	// 2) SASL/OAUTHBEARER is supported but is not configured as the client's
    75  	// authentication mechanism.
    76  	SetOAuthBearerTokenFailure(errstr string) error
    77  
    78  	// gethandle() returns the internal handle struct pointer
    79  	gethandle() *handle
    80  }
    81  
    82  // Common instance handle for both Producer and Consumer
    83  type handle struct {
    84  	rk  *C.rd_kafka_t
    85  	rkq *C.rd_kafka_queue_t
    86  
    87  	// Forward logs from librdkafka log queue to logs channel.
    88  	logs          chan LogEvent
    89  	logq          *C.rd_kafka_queue_t
    90  	closeLogsChan bool
    91  
    92  	// Topic <-> rkt caches
    93  	rktCacheLock sync.Mutex
    94  	// topic name -> rkt cache
    95  	rktCache map[string]*C.rd_kafka_topic_t
    96  	// rkt -> topic name cache
    97  	rktNameCache map[*C.rd_kafka_topic_t]string
    98  
    99  	// Cached instance name to avoid CGo call in String()
   100  	name string
   101  
   102  	//
   103  	// cgo map
   104  	// Maps C callbacks based on cgoid back to its Go object
   105  	cgoLock   sync.Mutex
   106  	cgoidNext uintptr
   107  	cgomap    map[int]cgoif
   108  
   109  	//
   110  	// producer
   111  	//
   112  	p *Producer
   113  
   114  	// Forward delivery reports on Producer.Events channel
   115  	fwdDr bool
   116  
   117  	// Enabled message fields for delivery reports and consumed messages.
   118  	msgFields *messageFields
   119  
   120  	//
   121  	// consumer
   122  	//
   123  	c *Consumer
   124  
   125  	// WaitGroup to wait for spawned go-routines to finish.
   126  	waitGroup sync.WaitGroup
   127  }
   128  
   129  func (h *handle) String() string {
   130  	return h.name
   131  }
   132  
   133  func (h *handle) setup() {
   134  	h.rktCache = make(map[string]*C.rd_kafka_topic_t)
   135  	h.rktNameCache = make(map[*C.rd_kafka_topic_t]string)
   136  	h.cgomap = make(map[int]cgoif)
   137  	h.name = C.GoString(C.rd_kafka_name(h.rk))
   138  	if h.msgFields == nil {
   139  		h.msgFields = newMessageFields()
   140  	}
   141  }
   142  
   143  func (h *handle) cleanup() {
   144  	if h.logs != nil {
   145  		C.rd_kafka_queue_destroy(h.logq)
   146  		if h.closeLogsChan {
   147  			close(h.logs)
   148  		}
   149  	}
   150  
   151  	for _, crkt := range h.rktCache {
   152  		C.rd_kafka_topic_destroy(crkt)
   153  	}
   154  
   155  	if h.rkq != nil {
   156  		C.rd_kafka_queue_destroy(h.rkq)
   157  	}
   158  }
   159  
   160  func (h *handle) setupLogQueue(logsChan chan LogEvent, termChan chan bool) {
   161  	if logsChan == nil {
   162  		logsChan = make(chan LogEvent, 10000)
   163  		h.closeLogsChan = true
   164  	}
   165  
   166  	h.logs = logsChan
   167  
   168  	// Let librdkafka forward logs to our log queue instead of the main queue
   169  	h.logq = C.rd_kafka_queue_new(h.rk)
   170  	C.rd_kafka_set_log_queue(h.rk, h.logq)
   171  
   172  	// Start a polling goroutine to consume the log queue
   173  	h.waitGroup.Add(1)
   174  	go func() {
   175  		h.pollLogEvents(h.logs, 100, termChan)
   176  		h.waitGroup.Done()
   177  	}()
   178  
   179  }
   180  
   181  // getRkt0 finds or creates and returns a C topic_t object from the local cache.
   182  func (h *handle) getRkt0(topic string, ctopic *C.char, doLock bool) (crkt *C.rd_kafka_topic_t) {
   183  	if doLock {
   184  		h.rktCacheLock.Lock()
   185  		defer h.rktCacheLock.Unlock()
   186  	}
   187  	crkt, ok := h.rktCache[topic]
   188  	if ok {
   189  		return crkt
   190  	}
   191  
   192  	if ctopic == nil {
   193  		ctopic = C.CString(topic)
   194  		defer C.free(unsafe.Pointer(ctopic))
   195  	}
   196  
   197  	crkt = C.rd_kafka_topic_new(h.rk, ctopic, nil)
   198  	if crkt == nil {
   199  		panic(fmt.Sprintf("Unable to create new C topic \"%s\": %s",
   200  			topic, C.GoString(C.rd_kafka_err2str(C.rd_kafka_last_error()))))
   201  	}
   202  
   203  	h.rktCache[topic] = crkt
   204  	h.rktNameCache[crkt] = topic
   205  
   206  	return crkt
   207  }
   208  
   209  // getRkt finds or creates and returns a C topic_t object from the local cache.
   210  func (h *handle) getRkt(topic string) (crkt *C.rd_kafka_topic_t) {
   211  	return h.getRkt0(topic, nil, true)
   212  }
   213  
   214  // getTopicNameFromRkt returns the topic name for a C topic_t object, preferably
   215  // using the local cache to avoid a cgo call.
   216  func (h *handle) getTopicNameFromRkt(crkt *C.rd_kafka_topic_t) (topic string) {
   217  	h.rktCacheLock.Lock()
   218  	defer h.rktCacheLock.Unlock()
   219  
   220  	topic, ok := h.rktNameCache[crkt]
   221  	if ok {
   222  		return topic
   223  	}
   224  
   225  	// we need our own copy/refcount of the crkt
   226  	ctopic := C.rd_kafka_topic_name(crkt)
   227  	topic = C.GoString(ctopic)
   228  
   229  	crkt = h.getRkt0(topic, ctopic, false /* dont lock */)
   230  
   231  	return topic
   232  }
   233  
   234  // cgoif is a generic interface for holding Go state passed as opaque
   235  // value to the C code.
   236  // Since pointers to complex Go types cannot be passed to C we instead create
   237  // a cgoif object, generate a unique id that is added to the cgomap,
   238  // and then pass that id to the C code. When the C code callback is called we
   239  // use the id to look up the cgoif object in the cgomap.
   240  type cgoif interface{}
   241  
   242  // delivery report cgoif container
   243  type cgoDr struct {
   244  	deliveryChan chan Event
   245  	opaque       interface{}
   246  }
   247  
   248  // cgoPut adds object cg to the handle's cgo map and returns a
   249  // unique id for the added entry.
   250  // Thread-safe.
   251  // FIXME: the uniquity of the id is questionable over time.
   252  func (h *handle) cgoPut(cg cgoif) (cgoid int) {
   253  	h.cgoLock.Lock()
   254  	defer h.cgoLock.Unlock()
   255  
   256  	h.cgoidNext++
   257  	if h.cgoidNext == 0 {
   258  		h.cgoidNext++
   259  	}
   260  	cgoid = (int)(h.cgoidNext)
   261  	h.cgomap[cgoid] = cg
   262  	return cgoid
   263  }
   264  
   265  // cgoGet looks up cgoid in the cgo map, deletes the reference from the map
   266  // and returns the object, if found. Else returns nil, false.
   267  // Thread-safe.
   268  func (h *handle) cgoGet(cgoid int) (cg cgoif, found bool) {
   269  	if cgoid == 0 {
   270  		return nil, false
   271  	}
   272  
   273  	h.cgoLock.Lock()
   274  	defer h.cgoLock.Unlock()
   275  	cg, found = h.cgomap[cgoid]
   276  	if found {
   277  		delete(h.cgomap, cgoid)
   278  	}
   279  
   280  	return cg, found
   281  }
   282  
   283  // setOauthBearerToken - see rd_kafka_oauthbearer_set_token()
   284  func (h *handle) setOAuthBearerToken(oauthBearerToken OAuthBearerToken) error {
   285  	cTokenValue := C.CString(oauthBearerToken.TokenValue)
   286  	defer C.free(unsafe.Pointer(cTokenValue))
   287  
   288  	cPrincipal := C.CString(oauthBearerToken.Principal)
   289  	defer C.free(unsafe.Pointer(cPrincipal))
   290  
   291  	cErrstrSize := C.size_t(512)
   292  	cErrstr := (*C.char)(C.malloc(cErrstrSize))
   293  	defer C.free(unsafe.Pointer(cErrstr))
   294  
   295  	cExtensions := make([]*C.char, 2*len(oauthBearerToken.Extensions))
   296  	extensionSize := 0
   297  	for key, value := range oauthBearerToken.Extensions {
   298  		cExtensions[extensionSize] = C.CString(key)
   299  		defer C.free(unsafe.Pointer(cExtensions[extensionSize]))
   300  		extensionSize++
   301  		cExtensions[extensionSize] = C.CString(value)
   302  		defer C.free(unsafe.Pointer(cExtensions[extensionSize]))
   303  		extensionSize++
   304  	}
   305  
   306  	var cExtensionsToUse **C.char
   307  	if extensionSize > 0 {
   308  		cExtensionsToUse = (**C.char)(unsafe.Pointer(&cExtensions[0]))
   309  	}
   310  
   311  	cErr := C.rd_kafka_oauthbearer_set_token(h.rk, cTokenValue,
   312  		C.int64_t(oauthBearerToken.Expiration.UnixNano()/(1000*1000)), cPrincipal,
   313  		cExtensionsToUse, C.size_t(extensionSize), cErrstr, cErrstrSize)
   314  	if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR {
   315  		return nil
   316  	}
   317  	return newErrorFromCString(cErr, cErrstr)
   318  }
   319  
   320  // setOauthBearerTokenFailure - see rd_kafka_oauthbearer_set_token_failure()
   321  func (h *handle) setOAuthBearerTokenFailure(errstr string) error {
   322  	cerrstr := C.CString(errstr)
   323  	defer C.free(unsafe.Pointer(cerrstr))
   324  	cErr := C.rd_kafka_oauthbearer_set_token_failure(h.rk, cerrstr)
   325  	if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR {
   326  		return nil
   327  	}
   328  	return newError(cErr)
   329  }
   330  
   331  // messageFields controls which fields are made available for producer delivery reports & consumed messages.
   332  // true values indicate that the field should be included
   333  type messageFields struct {
   334  	Key     bool
   335  	Value   bool
   336  	Headers bool
   337  }
   338  
   339  // disableAll disable all fields
   340  func (mf *messageFields) disableAll() {
   341  	mf.Key = false
   342  	mf.Value = false
   343  	mf.Headers = false
   344  }
   345  
   346  // newMessageFields returns a new messageFields with all fields enabled
   347  func newMessageFields() *messageFields {
   348  	return &messageFields{
   349  		Key:     true,
   350  		Value:   true,
   351  		Headers: true,
   352  	}
   353  }
   354  
   355  // newMessageFieldsFrom constructs a new messageFields from the given configuration value
   356  func newMessageFieldsFrom(v ConfigValue) (*messageFields, error) {
   357  	msgFields := newMessageFields()
   358  	switch v {
   359  	case "all":
   360  		// nothing to do
   361  	case "", "none":
   362  		msgFields.disableAll()
   363  	default:
   364  		msgFields.disableAll()
   365  		for _, value := range strings.Split(v.(string), ",") {
   366  			switch value {
   367  			case "key":
   368  				msgFields.Key = true
   369  			case "value":
   370  				msgFields.Value = true
   371  			case "headers":
   372  				msgFields.Headers = true
   373  			default:
   374  				return nil, fmt.Errorf("unknown message field: %s", value)
   375  			}
   376  		}
   377  	}
   378  	return msgFields, nil
   379  }