gopkg.in/dotcloud/docker.v1@v1.13.1/daemon/logger/splunk/splunk.go (about)

     1  // Package splunk provides the log driver for forwarding server logs to
     2  // Splunk HTTP Event Collector endpoint.
     3  package splunk
     4  
     5  import (
     6  	"bytes"
     7  	"compress/gzip"
     8  	"crypto/tls"
     9  	"crypto/x509"
    10  	"encoding/json"
    11  	"fmt"
    12  	"io"
    13  	"io/ioutil"
    14  	"net/http"
    15  	"net/url"
    16  	"os"
    17  	"strconv"
    18  	"sync"
    19  	"time"
    20  
    21  	"github.com/Sirupsen/logrus"
    22  	"github.com/docker/docker/daemon/logger"
    23  	"github.com/docker/docker/daemon/logger/loggerutils"
    24  	"github.com/docker/docker/pkg/urlutil"
    25  )
    26  
    27  const (
    28  	driverName                    = "splunk"
    29  	splunkURLKey                  = "splunk-url"
    30  	splunkTokenKey                = "splunk-token"
    31  	splunkSourceKey               = "splunk-source"
    32  	splunkSourceTypeKey           = "splunk-sourcetype"
    33  	splunkIndexKey                = "splunk-index"
    34  	splunkCAPathKey               = "splunk-capath"
    35  	splunkCANameKey               = "splunk-caname"
    36  	splunkInsecureSkipVerifyKey   = "splunk-insecureskipverify"
    37  	splunkFormatKey               = "splunk-format"
    38  	splunkVerifyConnectionKey     = "splunk-verify-connection"
    39  	splunkGzipCompressionKey      = "splunk-gzip"
    40  	splunkGzipCompressionLevelKey = "splunk-gzip-level"
    41  	envKey                        = "env"
    42  	labelsKey                     = "labels"
    43  	tagKey                        = "tag"
    44  )
    45  
    46  const (
    47  	// How often do we send messages (if we are not reaching batch size)
    48  	defaultPostMessagesFrequency = 5 * time.Second
    49  	// How big can be batch of messages
    50  	defaultPostMessagesBatchSize = 1000
    51  	// Maximum number of messages we can store in buffer
    52  	defaultBufferMaximum = 10 * defaultPostMessagesBatchSize
    53  	// Number of messages allowed to be queued in the channel
    54  	defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize
    55  )
    56  
    57  const (
    58  	envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY"
    59  	envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE"
    60  	envVarBufferMaximum         = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX"
    61  	envVarStreamChannelSize     = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE"
    62  )
    63  
    64  type splunkLoggerInterface interface {
    65  	logger.Logger
    66  	worker()
    67  }
    68  
    69  type splunkLogger struct {
    70  	client    *http.Client
    71  	transport *http.Transport
    72  
    73  	url         string
    74  	auth        string
    75  	nullMessage *splunkMessage
    76  
    77  	// http compression
    78  	gzipCompression      bool
    79  	gzipCompressionLevel int
    80  
    81  	// Advanced options
    82  	postMessagesFrequency time.Duration
    83  	postMessagesBatchSize int
    84  	bufferMaximum         int
    85  
    86  	// For synchronization between background worker and logger.
    87  	// We use channel to send messages to worker go routine.
    88  	// All other variables for blocking Close call before we flush all messages to HEC
    89  	stream     chan *splunkMessage
    90  	lock       sync.RWMutex
    91  	closed     bool
    92  	closedCond *sync.Cond
    93  }
    94  
    95  type splunkLoggerInline struct {
    96  	*splunkLogger
    97  
    98  	nullEvent *splunkMessageEvent
    99  }
   100  
   101  type splunkLoggerJSON struct {
   102  	*splunkLoggerInline
   103  }
   104  
   105  type splunkLoggerRaw struct {
   106  	*splunkLogger
   107  
   108  	prefix []byte
   109  }
   110  
   111  type splunkMessage struct {
   112  	Event      interface{} `json:"event"`
   113  	Time       string      `json:"time"`
   114  	Host       string      `json:"host"`
   115  	Source     string      `json:"source,omitempty"`
   116  	SourceType string      `json:"sourcetype,omitempty"`
   117  	Index      string      `json:"index,omitempty"`
   118  }
   119  
   120  type splunkMessageEvent struct {
   121  	Line   interface{}       `json:"line"`
   122  	Source string            `json:"source"`
   123  	Tag    string            `json:"tag,omitempty"`
   124  	Attrs  map[string]string `json:"attrs,omitempty"`
   125  }
   126  
   127  const (
   128  	splunkFormatRaw    = "raw"
   129  	splunkFormatJSON   = "json"
   130  	splunkFormatInline = "inline"
   131  )
   132  
   133  func init() {
   134  	if err := logger.RegisterLogDriver(driverName, New); err != nil {
   135  		logrus.Fatal(err)
   136  	}
   137  	if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil {
   138  		logrus.Fatal(err)
   139  	}
   140  }
   141  
   142  // New creates splunk logger driver using configuration passed in context
   143  func New(ctx logger.Context) (logger.Logger, error) {
   144  	hostname, err := ctx.Hostname()
   145  	if err != nil {
   146  		return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName)
   147  	}
   148  
   149  	// Parse and validate Splunk URL
   150  	splunkURL, err := parseURL(ctx)
   151  	if err != nil {
   152  		return nil, err
   153  	}
   154  
   155  	// Splunk Token is required parameter
   156  	splunkToken, ok := ctx.Config[splunkTokenKey]
   157  	if !ok {
   158  		return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey)
   159  	}
   160  
   161  	tlsConfig := &tls.Config{}
   162  
   163  	// Splunk is using autogenerated certificates by default,
   164  	// allow users to trust them with skipping verification
   165  	if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok {
   166  		insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr)
   167  		if err != nil {
   168  			return nil, err
   169  		}
   170  		tlsConfig.InsecureSkipVerify = insecureSkipVerify
   171  	}
   172  
   173  	// If path to the root certificate is provided - load it
   174  	if caPath, ok := ctx.Config[splunkCAPathKey]; ok {
   175  		caCert, err := ioutil.ReadFile(caPath)
   176  		if err != nil {
   177  			return nil, err
   178  		}
   179  		caPool := x509.NewCertPool()
   180  		caPool.AppendCertsFromPEM(caCert)
   181  		tlsConfig.RootCAs = caPool
   182  	}
   183  
   184  	if caName, ok := ctx.Config[splunkCANameKey]; ok {
   185  		tlsConfig.ServerName = caName
   186  	}
   187  
   188  	gzipCompression := false
   189  	if gzipCompressionStr, ok := ctx.Config[splunkGzipCompressionKey]; ok {
   190  		gzipCompression, err = strconv.ParseBool(gzipCompressionStr)
   191  		if err != nil {
   192  			return nil, err
   193  		}
   194  	}
   195  
   196  	gzipCompressionLevel := gzip.DefaultCompression
   197  	if gzipCompressionLevelStr, ok := ctx.Config[splunkGzipCompressionLevelKey]; ok {
   198  		var err error
   199  		gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32)
   200  		if err != nil {
   201  			return nil, err
   202  		}
   203  		gzipCompressionLevel = int(gzipCompressionLevel64)
   204  		if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression {
   205  			err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).",
   206  				gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression)
   207  			return nil, err
   208  		}
   209  	}
   210  
   211  	transport := &http.Transport{
   212  		TLSClientConfig: tlsConfig,
   213  	}
   214  	client := &http.Client{
   215  		Transport: transport,
   216  	}
   217  
   218  	source := ctx.Config[splunkSourceKey]
   219  	sourceType := ctx.Config[splunkSourceTypeKey]
   220  	index := ctx.Config[splunkIndexKey]
   221  
   222  	var nullMessage = &splunkMessage{
   223  		Host:       hostname,
   224  		Source:     source,
   225  		SourceType: sourceType,
   226  		Index:      index,
   227  	}
   228  
   229  	// Allow user to remove tag from the messages by setting tag to empty string
   230  	tag := ""
   231  	if tagTemplate, ok := ctx.Config[tagKey]; !ok || tagTemplate != "" {
   232  		tag, err = loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate)
   233  		if err != nil {
   234  			return nil, err
   235  		}
   236  	}
   237  
   238  	attrs := ctx.ExtraAttributes(nil)
   239  
   240  	var (
   241  		postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency)
   242  		postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize)
   243  		bufferMaximum         = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum)
   244  		streamChannelSize     = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize)
   245  	)
   246  
   247  	logger := &splunkLogger{
   248  		client:                client,
   249  		transport:             transport,
   250  		url:                   splunkURL.String(),
   251  		auth:                  "Splunk " + splunkToken,
   252  		nullMessage:           nullMessage,
   253  		gzipCompression:       gzipCompression,
   254  		gzipCompressionLevel:  gzipCompressionLevel,
   255  		stream:                make(chan *splunkMessage, streamChannelSize),
   256  		postMessagesFrequency: postMessagesFrequency,
   257  		postMessagesBatchSize: postMessagesBatchSize,
   258  		bufferMaximum:         bufferMaximum,
   259  	}
   260  
   261  	// By default we verify connection, but we allow use to skip that
   262  	verifyConnection := true
   263  	if verifyConnectionStr, ok := ctx.Config[splunkVerifyConnectionKey]; ok {
   264  		var err error
   265  		verifyConnection, err = strconv.ParseBool(verifyConnectionStr)
   266  		if err != nil {
   267  			return nil, err
   268  		}
   269  	}
   270  	if verifyConnection {
   271  		err = verifySplunkConnection(logger)
   272  		if err != nil {
   273  			return nil, err
   274  		}
   275  	}
   276  
   277  	var splunkFormat string
   278  	if splunkFormatParsed, ok := ctx.Config[splunkFormatKey]; ok {
   279  		switch splunkFormatParsed {
   280  		case splunkFormatInline:
   281  		case splunkFormatJSON:
   282  		case splunkFormatRaw:
   283  		default:
   284  			return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat)
   285  		}
   286  		splunkFormat = splunkFormatParsed
   287  	} else {
   288  		splunkFormat = splunkFormatInline
   289  	}
   290  
   291  	var loggerWrapper splunkLoggerInterface
   292  
   293  	switch splunkFormat {
   294  	case splunkFormatInline:
   295  		nullEvent := &splunkMessageEvent{
   296  			Tag:   tag,
   297  			Attrs: attrs,
   298  		}
   299  
   300  		loggerWrapper = &splunkLoggerInline{logger, nullEvent}
   301  	case splunkFormatJSON:
   302  		nullEvent := &splunkMessageEvent{
   303  			Tag:   tag,
   304  			Attrs: attrs,
   305  		}
   306  
   307  		loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}}
   308  	case splunkFormatRaw:
   309  		var prefix bytes.Buffer
   310  		if tag != "" {
   311  			prefix.WriteString(tag)
   312  			prefix.WriteString(" ")
   313  		}
   314  		for key, value := range attrs {
   315  			prefix.WriteString(key)
   316  			prefix.WriteString("=")
   317  			prefix.WriteString(value)
   318  			prefix.WriteString(" ")
   319  		}
   320  
   321  		loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()}
   322  	default:
   323  		return nil, fmt.Errorf("Unexpected format %s", splunkFormat)
   324  	}
   325  
   326  	go loggerWrapper.worker()
   327  
   328  	return loggerWrapper, nil
   329  }
   330  
   331  func (l *splunkLoggerInline) Log(msg *logger.Message) error {
   332  	message := l.createSplunkMessage(msg)
   333  
   334  	event := *l.nullEvent
   335  	event.Line = string(msg.Line)
   336  	event.Source = msg.Source
   337  
   338  	message.Event = &event
   339  
   340  	return l.queueMessageAsync(message)
   341  }
   342  
   343  func (l *splunkLoggerJSON) Log(msg *logger.Message) error {
   344  	message := l.createSplunkMessage(msg)
   345  	event := *l.nullEvent
   346  
   347  	var rawJSONMessage json.RawMessage
   348  	if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil {
   349  		event.Line = &rawJSONMessage
   350  	} else {
   351  		event.Line = string(msg.Line)
   352  	}
   353  
   354  	event.Source = msg.Source
   355  
   356  	message.Event = &event
   357  
   358  	return l.queueMessageAsync(message)
   359  }
   360  
   361  func (l *splunkLoggerRaw) Log(msg *logger.Message) error {
   362  	message := l.createSplunkMessage(msg)
   363  
   364  	message.Event = string(append(l.prefix, msg.Line...))
   365  
   366  	return l.queueMessageAsync(message)
   367  }
   368  
   369  func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error {
   370  	l.lock.RLock()
   371  	defer l.lock.RUnlock()
   372  	if l.closedCond != nil {
   373  		return fmt.Errorf("%s: driver is closed", driverName)
   374  	}
   375  	l.stream <- message
   376  	return nil
   377  }
   378  
   379  func (l *splunkLogger) worker() {
   380  	timer := time.NewTicker(l.postMessagesFrequency)
   381  	var messages []*splunkMessage
   382  	for {
   383  		select {
   384  		case message, open := <-l.stream:
   385  			if !open {
   386  				l.postMessages(messages, true)
   387  				l.lock.Lock()
   388  				defer l.lock.Unlock()
   389  				l.transport.CloseIdleConnections()
   390  				l.closed = true
   391  				l.closedCond.Signal()
   392  				return
   393  			}
   394  			messages = append(messages, message)
   395  			// Only sending when we get exactly to the batch size,
   396  			// This also helps not to fire postMessages on every new message,
   397  			// when previous try failed.
   398  			if len(messages)%l.postMessagesBatchSize == 0 {
   399  				messages = l.postMessages(messages, false)
   400  			}
   401  		case <-timer.C:
   402  			messages = l.postMessages(messages, false)
   403  		}
   404  	}
   405  }
   406  
   407  func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage {
   408  	messagesLen := len(messages)
   409  	for i := 0; i < messagesLen; i += l.postMessagesBatchSize {
   410  		upperBound := i + l.postMessagesBatchSize
   411  		if upperBound > messagesLen {
   412  			upperBound = messagesLen
   413  		}
   414  		if err := l.tryPostMessages(messages[i:upperBound]); err != nil {
   415  			logrus.Error(err)
   416  			if messagesLen-i >= l.bufferMaximum || lastChance {
   417  				// If this is last chance - print them all to the daemon log
   418  				if lastChance {
   419  					upperBound = messagesLen
   420  				}
   421  				// Not all sent, but buffer has got to its maximum, let's log all messages
   422  				// we could not send and return buffer minus one batch size
   423  				for j := i; j < upperBound; j++ {
   424  					if jsonEvent, err := json.Marshal(messages[j]); err != nil {
   425  						logrus.Error(err)
   426  					} else {
   427  						logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent)))
   428  					}
   429  				}
   430  				return messages[upperBound:messagesLen]
   431  			}
   432  			// Not all sent, returning buffer from where we have not sent messages
   433  			return messages[i:messagesLen]
   434  		}
   435  	}
   436  	// All sent, return empty buffer
   437  	return messages[:0]
   438  }
   439  
   440  func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error {
   441  	if len(messages) == 0 {
   442  		return nil
   443  	}
   444  	var buffer bytes.Buffer
   445  	var writer io.Writer
   446  	var gzipWriter *gzip.Writer
   447  	var err error
   448  	// If gzip compression is enabled - create gzip writer with specified compression
   449  	// level. If gzip compression is disabled, use standard buffer as a writer
   450  	if l.gzipCompression {
   451  		gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel)
   452  		if err != nil {
   453  			return err
   454  		}
   455  		writer = gzipWriter
   456  	} else {
   457  		writer = &buffer
   458  	}
   459  	for _, message := range messages {
   460  		jsonEvent, err := json.Marshal(message)
   461  		if err != nil {
   462  			return err
   463  		}
   464  		if _, err := writer.Write(jsonEvent); err != nil {
   465  			return err
   466  		}
   467  	}
   468  	// If gzip compression is enabled, tell it, that we are done
   469  	if l.gzipCompression {
   470  		err = gzipWriter.Close()
   471  		if err != nil {
   472  			return err
   473  		}
   474  	}
   475  	req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes()))
   476  	if err != nil {
   477  		return err
   478  	}
   479  	req.Header.Set("Authorization", l.auth)
   480  	// Tell if we are sending gzip compressed body
   481  	if l.gzipCompression {
   482  		req.Header.Set("Content-Encoding", "gzip")
   483  	}
   484  	res, err := l.client.Do(req)
   485  	if err != nil {
   486  		return err
   487  	}
   488  	defer res.Body.Close()
   489  	if res.StatusCode != http.StatusOK {
   490  		var body []byte
   491  		body, err = ioutil.ReadAll(res.Body)
   492  		if err != nil {
   493  			return err
   494  		}
   495  		return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body)
   496  	}
   497  	io.Copy(ioutil.Discard, res.Body)
   498  	return nil
   499  }
   500  
   501  func (l *splunkLogger) Close() error {
   502  	l.lock.Lock()
   503  	defer l.lock.Unlock()
   504  	if l.closedCond == nil {
   505  		l.closedCond = sync.NewCond(&l.lock)
   506  		close(l.stream)
   507  		for !l.closed {
   508  			l.closedCond.Wait()
   509  		}
   510  	}
   511  	return nil
   512  }
   513  
   514  func (l *splunkLogger) Name() string {
   515  	return driverName
   516  }
   517  
   518  func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage {
   519  	message := *l.nullMessage
   520  	message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second))
   521  	return &message
   522  }
   523  
   524  // ValidateLogOpt looks for all supported by splunk driver options
   525  func ValidateLogOpt(cfg map[string]string) error {
   526  	for key := range cfg {
   527  		switch key {
   528  		case splunkURLKey:
   529  		case splunkTokenKey:
   530  		case splunkSourceKey:
   531  		case splunkSourceTypeKey:
   532  		case splunkIndexKey:
   533  		case splunkCAPathKey:
   534  		case splunkCANameKey:
   535  		case splunkInsecureSkipVerifyKey:
   536  		case splunkFormatKey:
   537  		case splunkVerifyConnectionKey:
   538  		case splunkGzipCompressionKey:
   539  		case splunkGzipCompressionLevelKey:
   540  		case envKey:
   541  		case labelsKey:
   542  		case tagKey:
   543  		default:
   544  			return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName)
   545  		}
   546  	}
   547  	return nil
   548  }
   549  
   550  func parseURL(ctx logger.Context) (*url.URL, error) {
   551  	splunkURLStr, ok := ctx.Config[splunkURLKey]
   552  	if !ok {
   553  		return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey)
   554  	}
   555  
   556  	splunkURL, err := url.Parse(splunkURLStr)
   557  	if err != nil {
   558  		return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey)
   559  	}
   560  
   561  	if !urlutil.IsURL(splunkURLStr) ||
   562  		!splunkURL.IsAbs() ||
   563  		(splunkURL.Path != "" && splunkURL.Path != "/") ||
   564  		splunkURL.RawQuery != "" ||
   565  		splunkURL.Fragment != "" {
   566  		return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey)
   567  	}
   568  
   569  	splunkURL.Path = "/services/collector/event/1.0"
   570  
   571  	return splunkURL, nil
   572  }
   573  
   574  func verifySplunkConnection(l *splunkLogger) error {
   575  	req, err := http.NewRequest(http.MethodOptions, l.url, nil)
   576  	if err != nil {
   577  		return err
   578  	}
   579  	res, err := l.client.Do(req)
   580  	if err != nil {
   581  		return err
   582  	}
   583  	if res.Body != nil {
   584  		defer res.Body.Close()
   585  	}
   586  	if res.StatusCode != http.StatusOK {
   587  		var body []byte
   588  		body, err = ioutil.ReadAll(res.Body)
   589  		if err != nil {
   590  			return err
   591  		}
   592  		return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body)
   593  	}
   594  	return nil
   595  }
   596  
   597  func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration {
   598  	valueStr := os.Getenv(envName)
   599  	if valueStr == "" {
   600  		return defaultValue
   601  	}
   602  	parsedValue, err := time.ParseDuration(valueStr)
   603  	if err != nil {
   604  		logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err))
   605  		return defaultValue
   606  	}
   607  	return parsedValue
   608  }
   609  
   610  func getAdvancedOptionInt(envName string, defaultValue int) int {
   611  	valueStr := os.Getenv(envName)
   612  	if valueStr == "" {
   613  		return defaultValue
   614  	}
   615  	parsedValue, err := strconv.ParseInt(valueStr, 10, 32)
   616  	if err != nil {
   617  		logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err))
   618  		return defaultValue
   619  	}
   620  	return int(parsedValue)
   621  }