github.com/Heebron/moby@v0.0.0-20221111184709-6eab4f55faf7/daemon/logger/splunk/splunk.go (about)

     1  // Package splunk provides the log driver for forwarding server logs to
     2  // Splunk HTTP Event Collector endpoint.
     3  package splunk // import "github.com/docker/docker/daemon/logger/splunk"
     4  
     5  import (
     6  	"bytes"
     7  	"compress/gzip"
     8  	"context"
     9  	"crypto/tls"
    10  	"crypto/x509"
    11  	"encoding/json"
    12  	"fmt"
    13  	"io"
    14  	"net/http"
    15  	"net/url"
    16  	"os"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"time"
    21  
    22  	"github.com/docker/docker/daemon/logger"
    23  	"github.com/docker/docker/daemon/logger/loggerutils"
    24  	"github.com/docker/docker/pkg/pools"
    25  	"github.com/google/uuid"
    26  	"github.com/sirupsen/logrus"
    27  )
    28  
    29  const (
    30  	driverName                    = "splunk"
    31  	splunkURLKey                  = "splunk-url"
    32  	splunkTokenKey                = "splunk-token"
    33  	splunkSourceKey               = "splunk-source"
    34  	splunkSourceTypeKey           = "splunk-sourcetype"
    35  	splunkIndexKey                = "splunk-index"
    36  	splunkCAPathKey               = "splunk-capath"
    37  	splunkCANameKey               = "splunk-caname"
    38  	splunkInsecureSkipVerifyKey   = "splunk-insecureskipverify"
    39  	splunkFormatKey               = "splunk-format"
    40  	splunkVerifyConnectionKey     = "splunk-verify-connection"
    41  	splunkGzipCompressionKey      = "splunk-gzip"
    42  	splunkGzipCompressionLevelKey = "splunk-gzip-level"
    43  	splunkIndexAcknowledgment     = "splunk-index-acknowledgment"
    44  	envKey                        = "env"
    45  	envRegexKey                   = "env-regex"
    46  	labelsKey                     = "labels"
    47  	labelsRegexKey                = "labels-regex"
    48  	tagKey                        = "tag"
    49  )
    50  
    51  const (
    52  	// How often do we send messages (if we are not reaching batch size)
    53  	defaultPostMessagesFrequency = 5 * time.Second
    54  	// How big can be batch of messages
    55  	defaultPostMessagesBatchSize = 1000
    56  	// Maximum number of messages we can store in buffer
    57  	defaultBufferMaximum = 10 * defaultPostMessagesBatchSize
    58  	// Number of messages allowed to be queued in the channel
    59  	defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize
    60  	// maxResponseSize is the max amount that will be read from an http response
    61  	maxResponseSize = 1024
    62  )
    63  
    64  const (
    65  	envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY"
    66  	envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE"
    67  	envVarBufferMaximum         = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX"
    68  	envVarStreamChannelSize     = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE"
    69  )
    70  
    71  var batchSendTimeout = 30 * time.Second
    72  
    73  type splunkLoggerInterface interface {
    74  	logger.Logger
    75  	worker()
    76  }
    77  
    78  type splunkLogger struct {
    79  	client    *http.Client
    80  	transport *http.Transport
    81  
    82  	url         string
    83  	auth        string
    84  	nullMessage *splunkMessage
    85  
    86  	// http compression
    87  	gzipCompression      bool
    88  	gzipCompressionLevel int
    89  
    90  	// Advanced options
    91  	postMessagesFrequency time.Duration
    92  	postMessagesBatchSize int
    93  	bufferMaximum         int
    94  	indexAck              bool
    95  
    96  	// For synchronization between background worker and logger.
    97  	// We use channel to send messages to worker go routine.
    98  	// All other variables for blocking Close call before we flush all messages to HEC
    99  	stream     chan *splunkMessage
   100  	lock       sync.RWMutex
   101  	closed     bool
   102  	closedCond *sync.Cond
   103  }
   104  
   105  type splunkLoggerInline struct {
   106  	*splunkLogger
   107  
   108  	nullEvent *splunkMessageEvent
   109  }
   110  
   111  type splunkLoggerJSON struct {
   112  	*splunkLoggerInline
   113  }
   114  
   115  type splunkLoggerRaw struct {
   116  	*splunkLogger
   117  
   118  	prefix []byte
   119  }
   120  
   121  type splunkMessage struct {
   122  	Event      interface{} `json:"event"`
   123  	Time       string      `json:"time"`
   124  	Host       string      `json:"host"`
   125  	Source     string      `json:"source,omitempty"`
   126  	SourceType string      `json:"sourcetype,omitempty"`
   127  	Index      string      `json:"index,omitempty"`
   128  }
   129  
   130  type splunkMessageEvent struct {
   131  	Line   interface{}       `json:"line"`
   132  	Source string            `json:"source"`
   133  	Tag    string            `json:"tag,omitempty"`
   134  	Attrs  map[string]string `json:"attrs,omitempty"`
   135  }
   136  
   137  const (
   138  	splunkFormatRaw    = "raw"
   139  	splunkFormatJSON   = "json"
   140  	splunkFormatInline = "inline"
   141  )
   142  
   143  func init() {
   144  	if err := logger.RegisterLogDriver(driverName, New); err != nil {
   145  		panic(err)
   146  	}
   147  	if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil {
   148  		panic(err)
   149  	}
   150  }
   151  
   152  // New creates splunk logger driver using configuration passed in context
   153  func New(info logger.Info) (logger.Logger, error) {
   154  	hostname, err := info.Hostname()
   155  	if err != nil {
   156  		return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName)
   157  	}
   158  
   159  	// Parse and validate Splunk URL
   160  	splunkURL, err := parseURL(info)
   161  	if err != nil {
   162  		return nil, err
   163  	}
   164  
   165  	// Splunk Token is required parameter
   166  	splunkToken, ok := info.Config[splunkTokenKey]
   167  	if !ok {
   168  		return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey)
   169  	}
   170  
   171  	// FIXME set minimum TLS version for splunk (see https://github.com/moby/moby/issues/42443)
   172  	tlsConfig := &tls.Config{} //nolint: gosec // G402: TLS MinVersion too low.
   173  
   174  	// Splunk is using autogenerated certificates by default,
   175  	// allow users to trust them with skipping verification
   176  	if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok {
   177  		insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr)
   178  		if err != nil {
   179  			return nil, err
   180  		}
   181  		tlsConfig.InsecureSkipVerify = insecureSkipVerify
   182  	}
   183  
   184  	// If path to the root certificate is provided - load it
   185  	if caPath, ok := info.Config[splunkCAPathKey]; ok {
   186  		caCert, err := os.ReadFile(caPath)
   187  		if err != nil {
   188  			return nil, err
   189  		}
   190  		caPool := x509.NewCertPool()
   191  		caPool.AppendCertsFromPEM(caCert)
   192  		tlsConfig.RootCAs = caPool
   193  	}
   194  
   195  	if caName, ok := info.Config[splunkCANameKey]; ok {
   196  		tlsConfig.ServerName = caName
   197  	}
   198  
   199  	gzipCompression := false
   200  	if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok {
   201  		gzipCompression, err = strconv.ParseBool(gzipCompressionStr)
   202  		if err != nil {
   203  			return nil, err
   204  		}
   205  	}
   206  
   207  	gzipCompressionLevel := gzip.DefaultCompression
   208  	if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok {
   209  		var err error
   210  		gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32)
   211  		if err != nil {
   212  			return nil, err
   213  		}
   214  		gzipCompressionLevel = int(gzipCompressionLevel64)
   215  		if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression {
   216  			err := fmt.Errorf("not supported level '%s' for %s (supported values between %d and %d)",
   217  				gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression)
   218  			return nil, err
   219  		}
   220  	}
   221  
   222  	indexAck := false
   223  	if indexAckStr, ok := info.Config[splunkIndexAcknowledgment]; ok {
   224  		indexAck, err = strconv.ParseBool(indexAckStr)
   225  		if err != nil {
   226  			return nil, err
   227  		}
   228  	}
   229  
   230  	transport := &http.Transport{
   231  		TLSClientConfig: tlsConfig,
   232  		Proxy:           http.ProxyFromEnvironment,
   233  	}
   234  	client := &http.Client{
   235  		Transport: transport,
   236  	}
   237  
   238  	source := info.Config[splunkSourceKey]
   239  	sourceType := info.Config[splunkSourceTypeKey]
   240  	index := info.Config[splunkIndexKey]
   241  
   242  	var nullMessage = &splunkMessage{
   243  		Host:       hostname,
   244  		Source:     source,
   245  		SourceType: sourceType,
   246  		Index:      index,
   247  	}
   248  
   249  	// Allow user to remove tag from the messages by setting tag to empty string
   250  	tag := ""
   251  	if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" {
   252  		tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate)
   253  		if err != nil {
   254  			return nil, err
   255  		}
   256  	}
   257  
   258  	attrs, err := info.ExtraAttributes(nil)
   259  	if err != nil {
   260  		return nil, err
   261  	}
   262  
   263  	var (
   264  		postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency)
   265  		postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize)
   266  		bufferMaximum         = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum)
   267  		streamChannelSize     = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize)
   268  	)
   269  
   270  	logger := &splunkLogger{
   271  		client:                client,
   272  		transport:             transport,
   273  		url:                   splunkURL.String(),
   274  		auth:                  "Splunk " + splunkToken,
   275  		nullMessage:           nullMessage,
   276  		gzipCompression:       gzipCompression,
   277  		gzipCompressionLevel:  gzipCompressionLevel,
   278  		stream:                make(chan *splunkMessage, streamChannelSize),
   279  		postMessagesFrequency: postMessagesFrequency,
   280  		postMessagesBatchSize: postMessagesBatchSize,
   281  		bufferMaximum:         bufferMaximum,
   282  		indexAck:              indexAck,
   283  	}
   284  
   285  	// By default we verify connection, but we allow use to skip that
   286  	verifyConnection := true
   287  	if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok {
   288  		var err error
   289  		verifyConnection, err = strconv.ParseBool(verifyConnectionStr)
   290  		if err != nil {
   291  			return nil, err
   292  		}
   293  	}
   294  	if verifyConnection {
   295  		err = verifySplunkConnection(logger)
   296  		if err != nil {
   297  			return nil, err
   298  		}
   299  	}
   300  
   301  	var splunkFormat string
   302  	if splunkFormatParsed, ok := info.Config[splunkFormatKey]; ok {
   303  		switch splunkFormatParsed {
   304  		case splunkFormatInline:
   305  		case splunkFormatJSON:
   306  		case splunkFormatRaw:
   307  		default:
   308  			return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat)
   309  		}
   310  		splunkFormat = splunkFormatParsed
   311  	} else {
   312  		splunkFormat = splunkFormatInline
   313  	}
   314  
   315  	var loggerWrapper splunkLoggerInterface
   316  
   317  	switch splunkFormat {
   318  	case splunkFormatInline:
   319  		nullEvent := &splunkMessageEvent{
   320  			Tag:   tag,
   321  			Attrs: attrs,
   322  		}
   323  
   324  		loggerWrapper = &splunkLoggerInline{logger, nullEvent}
   325  	case splunkFormatJSON:
   326  		nullEvent := &splunkMessageEvent{
   327  			Tag:   tag,
   328  			Attrs: attrs,
   329  		}
   330  
   331  		loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}}
   332  	case splunkFormatRaw:
   333  		var prefix bytes.Buffer
   334  		if tag != "" {
   335  			prefix.WriteString(tag)
   336  			prefix.WriteString(" ")
   337  		}
   338  		for key, value := range attrs {
   339  			prefix.WriteString(key)
   340  			prefix.WriteString("=")
   341  			prefix.WriteString(value)
   342  			prefix.WriteString(" ")
   343  		}
   344  
   345  		loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()}
   346  	default:
   347  		return nil, fmt.Errorf("Unexpected format %s", splunkFormat)
   348  	}
   349  
   350  	go loggerWrapper.worker()
   351  
   352  	return loggerWrapper, nil
   353  }
   354  
   355  func (l *splunkLoggerInline) Log(msg *logger.Message) error {
   356  	message := l.createSplunkMessage(msg)
   357  
   358  	event := *l.nullEvent
   359  	event.Line = string(msg.Line)
   360  	event.Source = msg.Source
   361  
   362  	message.Event = &event
   363  	logger.PutMessage(msg)
   364  	return l.queueMessageAsync(message)
   365  }
   366  
   367  func (l *splunkLoggerJSON) Log(msg *logger.Message) error {
   368  	message := l.createSplunkMessage(msg)
   369  	event := *l.nullEvent
   370  
   371  	var rawJSONMessage json.RawMessage
   372  	if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil {
   373  		event.Line = &rawJSONMessage
   374  	} else {
   375  		event.Line = string(msg.Line)
   376  	}
   377  
   378  	event.Source = msg.Source
   379  
   380  	message.Event = &event
   381  	logger.PutMessage(msg)
   382  	return l.queueMessageAsync(message)
   383  }
   384  
   385  func (l *splunkLoggerRaw) Log(msg *logger.Message) error {
   386  	// empty or whitespace-only messages are not accepted by HEC
   387  	if strings.TrimSpace(string(msg.Line)) == "" {
   388  		return nil
   389  	}
   390  
   391  	message := l.createSplunkMessage(msg)
   392  
   393  	message.Event = string(append(l.prefix, msg.Line...))
   394  	logger.PutMessage(msg)
   395  	return l.queueMessageAsync(message)
   396  }
   397  
   398  func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error {
   399  	l.lock.RLock()
   400  	defer l.lock.RUnlock()
   401  	if l.closedCond != nil {
   402  		return fmt.Errorf("%s: driver is closed", driverName)
   403  	}
   404  	l.stream <- message
   405  	return nil
   406  }
   407  
   408  func (l *splunkLogger) worker() {
   409  	timer := time.NewTicker(l.postMessagesFrequency)
   410  	var messages []*splunkMessage
   411  	for {
   412  		select {
   413  		case message, open := <-l.stream:
   414  			if !open {
   415  				l.postMessages(messages, true)
   416  				l.lock.Lock()
   417  				defer l.lock.Unlock()
   418  				l.transport.CloseIdleConnections()
   419  				l.closed = true
   420  				l.closedCond.Signal()
   421  				return
   422  			}
   423  			messages = append(messages, message)
   424  			// Only sending when we get exactly to the batch size,
   425  			// This also helps not to fire postMessages on every new message,
   426  			// when previous try failed.
   427  			if len(messages)%l.postMessagesBatchSize == 0 {
   428  				messages = l.postMessages(messages, false)
   429  			}
   430  		case <-timer.C:
   431  			messages = l.postMessages(messages, false)
   432  		}
   433  	}
   434  }
   435  
   436  func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage {
   437  	messagesLen := len(messages)
   438  
   439  	ctx, cancel := context.WithTimeout(context.Background(), batchSendTimeout)
   440  	defer cancel()
   441  
   442  	for i := 0; i < messagesLen; i += l.postMessagesBatchSize {
   443  		upperBound := i + l.postMessagesBatchSize
   444  		if upperBound > messagesLen {
   445  			upperBound = messagesLen
   446  		}
   447  
   448  		if err := l.tryPostMessages(ctx, messages[i:upperBound]); err != nil {
   449  			logrus.WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs")
   450  			if messagesLen-i >= l.bufferMaximum || lastChance {
   451  				// If this is last chance - print them all to the daemon log
   452  				if lastChance {
   453  					upperBound = messagesLen
   454  				}
   455  				// Not all sent, but buffer has got to its maximum, let's log all messages
   456  				// we could not send and return buffer minus one batch size
   457  				for j := i; j < upperBound; j++ {
   458  					if jsonEvent, err := json.Marshal(messages[j]); err != nil {
   459  						logrus.Error(err)
   460  					} else {
   461  						logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent)))
   462  					}
   463  				}
   464  				return messages[upperBound:messagesLen]
   465  			}
   466  			// Not all sent, returning buffer from where we have not sent messages
   467  			return messages[i:messagesLen]
   468  		}
   469  	}
   470  	// All sent, return empty buffer
   471  	return messages[:0]
   472  }
   473  
   474  func (l *splunkLogger) tryPostMessages(ctx context.Context, messages []*splunkMessage) error {
   475  	if len(messages) == 0 {
   476  		return nil
   477  	}
   478  	var buffer bytes.Buffer
   479  	var writer io.Writer
   480  	var gzipWriter *gzip.Writer
   481  	var err error
   482  	// If gzip compression is enabled - create gzip writer with specified compression
   483  	// level. If gzip compression is disabled, use standard buffer as a writer
   484  	if l.gzipCompression {
   485  		gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel)
   486  		if err != nil {
   487  			return err
   488  		}
   489  		writer = gzipWriter
   490  	} else {
   491  		writer = &buffer
   492  	}
   493  	for _, message := range messages {
   494  		jsonEvent, err := json.Marshal(message)
   495  		if err != nil {
   496  			return err
   497  		}
   498  		if _, err := writer.Write(jsonEvent); err != nil {
   499  			return err
   500  		}
   501  	}
   502  	// If gzip compression is enabled, tell it, that we are done
   503  	if l.gzipCompression {
   504  		err = gzipWriter.Close()
   505  		if err != nil {
   506  			return err
   507  		}
   508  	}
   509  	req, err := http.NewRequest(http.MethodPost, l.url, bytes.NewBuffer(buffer.Bytes()))
   510  	if err != nil {
   511  		return err
   512  	}
   513  	req = req.WithContext(ctx)
   514  	req.Header.Set("Authorization", l.auth)
   515  	// Tell if we are sending gzip compressed body
   516  	if l.gzipCompression {
   517  		req.Header.Set("Content-Encoding", "gzip")
   518  	}
   519  	// Set the correct header if index acknowledgment is enabled
   520  	if l.indexAck {
   521  		requestChannel, err := uuid.NewRandom()
   522  		if err != nil {
   523  			return err
   524  		}
   525  		req.Header.Set("X-Splunk-Request-Channel", requestChannel.String())
   526  	}
   527  	resp, err := l.client.Do(req)
   528  	if err != nil {
   529  		return err
   530  	}
   531  	defer func() {
   532  		pools.Copy(io.Discard, resp.Body)
   533  		resp.Body.Close()
   534  	}()
   535  	if resp.StatusCode != http.StatusOK {
   536  		rdr := io.LimitReader(resp.Body, maxResponseSize)
   537  		body, err := io.ReadAll(rdr)
   538  		if err != nil {
   539  			return err
   540  		}
   541  		return fmt.Errorf("%s: failed to send event - %s - %s", driverName, resp.Status, string(body))
   542  	}
   543  	return nil
   544  }
   545  
   546  func (l *splunkLogger) Close() error {
   547  	l.lock.Lock()
   548  	defer l.lock.Unlock()
   549  	if l.closedCond == nil {
   550  		l.closedCond = sync.NewCond(&l.lock)
   551  		close(l.stream)
   552  		for !l.closed {
   553  			l.closedCond.Wait()
   554  		}
   555  	}
   556  	return nil
   557  }
   558  
   559  func (l *splunkLogger) Name() string {
   560  	return driverName
   561  }
   562  
   563  func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage {
   564  	message := *l.nullMessage
   565  	message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second))
   566  	return &message
   567  }
   568  
   569  // ValidateLogOpt looks for all supported by splunk driver options
   570  func ValidateLogOpt(cfg map[string]string) error {
   571  	for key := range cfg {
   572  		switch key {
   573  		case splunkURLKey:
   574  		case splunkTokenKey:
   575  		case splunkSourceKey:
   576  		case splunkSourceTypeKey:
   577  		case splunkIndexKey:
   578  		case splunkCAPathKey:
   579  		case splunkCANameKey:
   580  		case splunkInsecureSkipVerifyKey:
   581  		case splunkFormatKey:
   582  		case splunkVerifyConnectionKey:
   583  		case splunkGzipCompressionKey:
   584  		case splunkGzipCompressionLevelKey:
   585  		case splunkIndexAcknowledgment:
   586  		case envKey:
   587  		case envRegexKey:
   588  		case labelsKey:
   589  		case labelsRegexKey:
   590  		case tagKey:
   591  		default:
   592  			return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName)
   593  		}
   594  	}
   595  	return nil
   596  }
   597  
   598  func parseURL(info logger.Info) (*url.URL, error) {
   599  	splunkURLStr, ok := info.Config[splunkURLKey]
   600  	if !ok {
   601  		return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey)
   602  	}
   603  
   604  	splunkURL, err := url.Parse(splunkURLStr)
   605  	if err != nil {
   606  		return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey)
   607  	}
   608  
   609  	if !splunkURL.IsAbs() ||
   610  		(splunkURL.Scheme != "http" && splunkURL.Scheme != "https") ||
   611  		(splunkURL.Path != "" && splunkURL.Path != "/") ||
   612  		splunkURL.RawQuery != "" ||
   613  		splunkURL.Fragment != "" {
   614  		return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey)
   615  	}
   616  
   617  	splunkURL.Path = "/services/collector/event/1.0"
   618  
   619  	return splunkURL, nil
   620  }
   621  
   622  func verifySplunkConnection(l *splunkLogger) error {
   623  	req, err := http.NewRequest(http.MethodOptions, l.url, nil)
   624  	if err != nil {
   625  		return err
   626  	}
   627  	resp, err := l.client.Do(req)
   628  	if err != nil {
   629  		return err
   630  	}
   631  	defer func() {
   632  		pools.Copy(io.Discard, resp.Body)
   633  		resp.Body.Close()
   634  	}()
   635  
   636  	if resp.StatusCode != http.StatusOK {
   637  		rdr := io.LimitReader(resp.Body, maxResponseSize)
   638  		body, err := io.ReadAll(rdr)
   639  		if err != nil {
   640  			return err
   641  		}
   642  		return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, resp.Status, string(body))
   643  	}
   644  	return nil
   645  }
   646  
   647  func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration {
   648  	valueStr := os.Getenv(envName)
   649  	if valueStr == "" {
   650  		return defaultValue
   651  	}
   652  	parsedValue, err := time.ParseDuration(valueStr)
   653  	if err != nil {
   654  		logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err))
   655  		return defaultValue
   656  	}
   657  	return parsedValue
   658  }
   659  
   660  func getAdvancedOptionInt(envName string, defaultValue int) int {
   661  	valueStr := os.Getenv(envName)
   662  	if valueStr == "" {
   663  		return defaultValue
   664  	}
   665  	parsedValue, err := strconv.ParseInt(valueStr, 10, 32)
   666  	if err != nil {
   667  		logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err))
   668  		return defaultValue
   669  	}
   670  	return int(parsedValue)
   671  }