github.com/pf-qiu/concourse/v6@v6.7.3-0.20201207032516-1f455d73275f/atc/metric/emitter/newrelic.go (about)

     1  package emitter
     2  
     3  import (
     4  	"bytes"
     5  	"compress/gzip"
     6  	"encoding/json"
     7  	"fmt"
     8  	"io"
     9  	"io/ioutil"
    10  	"net/http"
    11  	"strings"
    12  	"time"
    13  
    14  	"code.cloudfoundry.org/lager"
    15  	"github.com/pf-qiu/concourse/v6/atc/metric"
    16  	"github.com/pkg/errors"
    17  )
    18  
    19  type (
    20  	stats struct {
    21  		created interface{}
    22  		deleted interface{}
    23  	}
    24  
    25  	NewRelicEmitter struct {
    26  		Client             *http.Client
    27  		Url                string
    28  		apikey             string
    29  		prefix             string
    30  		checks             *stats
    31  		containers         *stats
    32  		volumes            *stats
    33  		BatchSize          int
    34  		BatchDuration      time.Duration
    35  		DisableCompression bool
    36  		LastEmitTime       time.Time
    37  		NewRelicBatch      []NewRelicEvent
    38  	}
    39  
    40  	NewRelicConfig struct {
    41  		AccountID          string        `long:"newrelic-account-id" description:"New Relic Account ID"`
    42  		APIKey             string        `long:"newrelic-api-key" description:"New Relic Insights API Key"`
    43  		Url                string        `long:"newrelic-insights-api-url" default:"https://insights-collector.newrelic.com" description:"Base Url for insights Insert API"`
    44  		ServicePrefix      string        `long:"newrelic-service-prefix" default:"" description:"An optional prefix for emitted New Relic events"`
    45  		BatchSize          uint64        `long:"newrelic-batch-size" default:"2000" description:"Number of events to batch together before emitting"`
    46  		BatchDuration      time.Duration `long:"newrelic-batch-duration" default:"60s" description:"Length of time to wait between emitting until all currently batched events are emitted"`
    47  		DisableCompression bool          `long:"newrelic-batch-disable-compression" description:"Disables compression of the batch before sending it"`
    48  	}
    49  
    50  	NewRelicEvent map[string]interface{}
    51  )
    52  
    53  func init() {
    54  	metric.Metrics.RegisterEmitter(&NewRelicConfig{})
    55  }
    56  
    57  func (config *NewRelicConfig) Description() string { return "NewRelic" }
    58  func (config *NewRelicConfig) IsConfigured() bool {
    59  	return config.AccountID != "" && config.APIKey != ""
    60  }
    61  
    62  func (config *NewRelicConfig) NewEmitter() (metric.Emitter, error) {
    63  	client := &http.Client{
    64  		Transport: &http.Transport{Proxy: http.ProxyFromEnvironment},
    65  		Timeout:   time.Minute,
    66  	}
    67  
    68  	return &NewRelicEmitter{
    69  		Client:             client,
    70  		Url:                fmt.Sprintf("%s/v1/accounts/%s/events", config.Url, config.AccountID),
    71  		apikey:             config.APIKey,
    72  		prefix:             config.ServicePrefix,
    73  		containers:         new(stats),
    74  		volumes:            new(stats),
    75  		checks:             new(stats),
    76  		BatchSize:          int(config.BatchSize),
    77  		BatchDuration:      config.BatchDuration,
    78  		DisableCompression: config.DisableCompression,
    79  		LastEmitTime:       time.Now(),
    80  		NewRelicBatch:      make([]NewRelicEvent, 0),
    81  	}, nil
    82  }
    83  
    84  func (emitter *NewRelicEmitter) Emit(logger lager.Logger, event metric.Event) {
    85  	logger = logger.Session("new-relic")
    86  
    87  	switch event.Name {
    88  
    89  	// These are the simple ones that only need a small name transformation
    90  	case "build started",
    91  		"build finished",
    92  		"checks finished",
    93  		"checks started",
    94  		"checks enqueued",
    95  		"checks queue size",
    96  		"worker containers",
    97  		"worker volumes",
    98  		"concurrent requests",
    99  		"concurrent requests limit hit",
   100  		"http response time",
   101  		"database queries",
   102  		"database connections",
   103  		"worker unknown containers",
   104  		"worker unknown volumes",
   105  		"volumes streamed":
   106  		emitter.NewRelicBatch = append(emitter.NewRelicBatch, emitter.transformToNewRelicEvent(event, ""))
   107  
   108  	// These are periodic metrics that are consolidated and only emitted once
   109  	// per cycle (the emit trigger is chosen because it's currently last in the
   110  	// periodic list, so we should have a coherent view). We do this because
   111  	// new relic has a hard limit on the total number of metrics in a 24h
   112  	// period, so batching similar data where possible makes sense.
   113  	case "checks deleted":
   114  		emitter.checks.deleted = event.Value
   115  	case "containers deleted":
   116  		emitter.containers.deleted = event.Value
   117  	case "containers created":
   118  		emitter.containers.created = event.Value
   119  	case "failed containers":
   120  		singleEvent := emitter.transformToNewRelicEvent(event, "containers")
   121  		singleEvent["failed"] = singleEvent["value"]
   122  		singleEvent["created"] = emitter.containers.created
   123  		singleEvent["deleted"] = emitter.containers.deleted
   124  		delete(singleEvent, "value")
   125  		emitter.NewRelicBatch = append(emitter.NewRelicBatch, singleEvent)
   126  
   127  	case "volumes deleted":
   128  		emitter.volumes.deleted = event.Value
   129  	case "volumes created":
   130  		emitter.volumes.created = event.Value
   131  	case "failed volumes":
   132  		singleEvent := emitter.transformToNewRelicEvent(event, "volumes")
   133  		singleEvent["failed"] = singleEvent["value"]
   134  		singleEvent["created"] = emitter.volumes.created
   135  		singleEvent["deleted"] = emitter.volumes.deleted
   136  		delete(singleEvent, "value")
   137  		emitter.NewRelicBatch = append(emitter.NewRelicBatch, singleEvent)
   138  
   139  	// And a couple that need a small rename (new relic doesn't like some chars)
   140  	case "scheduling: full duration (ms)":
   141  		emitter.NewRelicBatch = append(emitter.NewRelicBatch, emitter.transformToNewRelicEvent(event,
   142  			"scheduling_full_duration_ms"))
   143  	case "scheduling: loading versions duration (ms)":
   144  		emitter.NewRelicBatch = append(emitter.NewRelicBatch, emitter.transformToNewRelicEvent(event,
   145  			"scheduling_load_duration_ms"))
   146  	case "scheduling: job duration (ms)":
   147  		emitter.NewRelicBatch = append(emitter.NewRelicBatch, emitter.transformToNewRelicEvent(event,
   148  			"scheduling_job_duration_ms"))
   149  	default:
   150  		// Ignore the rest
   151  	}
   152  
   153  	duration := time.Since(emitter.LastEmitTime)
   154  	if len(emitter.NewRelicBatch) >= emitter.BatchSize || duration >= emitter.BatchDuration {
   155  		logger.Debug("pre-emit-batch", lager.Data{
   156  			"batch-size":         emitter.BatchSize,
   157  			"current-batch-size": len(emitter.NewRelicBatch),
   158  			"batch-duration":     emitter.BatchDuration,
   159  			"current-duration":   duration,
   160  		})
   161  		emitter.submitBatch(logger)
   162  	}
   163  }
   164  
   165  // NewRelic has strict requirements around the structure of the events
   166  // Keys must be alphanumeric and can contain hyphens or underscores
   167  // Values must be sting, int, or unix timestamps. No maps/arrays.
   168  func (emitter *NewRelicEmitter) transformToNewRelicEvent(event metric.Event, nameOverride string) NewRelicEvent {
   169  	name := nameOverride
   170  	if name == "" {
   171  		name = strings.Replace(event.Name, " ", "_", -1)
   172  	}
   173  
   174  	eventType := emitter.prefix + name
   175  
   176  	payload := NewRelicEvent{
   177  		"eventType": eventType,
   178  		"value":     event.Value,
   179  		"host":      event.Host,
   180  		"timestamp": event.Time.Unix(),
   181  	}
   182  
   183  	for k, v := range event.Attributes {
   184  		payload["_"+k] = v
   185  	}
   186  	return payload
   187  }
   188  
   189  func (emitter *NewRelicEmitter) submitBatch(logger lager.Logger) {
   190  	batchToSubmit := make([]NewRelicEvent, len(emitter.NewRelicBatch))
   191  	copy(batchToSubmit, emitter.NewRelicBatch)
   192  	emitter.NewRelicBatch = make([]NewRelicEvent, 0)
   193  	emitter.LastEmitTime = time.Now()
   194  	go emitter.emitBatch(logger, batchToSubmit)
   195  }
   196  
   197  func (emitter *NewRelicEmitter) emitBatch(logger lager.Logger, payload []NewRelicEvent) {
   198  	batch, err := emitter.marshalJSON(logger, payload)
   199  	if err != nil {
   200  		logger.Error("failed-to-marshal-batch", err)
   201  		return
   202  	}
   203  
   204  	req, err := http.NewRequest("POST", emitter.Url, batch)
   205  	if err != nil {
   206  		logger.Error("failed-to-construct-request", err)
   207  		return
   208  	}
   209  
   210  	req.Header.Add("Content-Type", "application/json")
   211  	req.Header.Add("X-Insert-Key", emitter.apikey)
   212  	if !emitter.DisableCompression {
   213  		req.Header.Add("Content-Encoding", "gzip")
   214  	}
   215  
   216  	resp, err := emitter.Client.Do(req)
   217  	defer resp.Body.Close()
   218  	if err != nil {
   219  		logger.Error("failed-to-send-request",
   220  			errors.Wrap(metric.ErrFailedToEmit, err.Error()))
   221  		return
   222  	}
   223  
   224  	if resp.StatusCode < 200 || resp.StatusCode > 299 {
   225  		bodyBytes, err := ioutil.ReadAll(resp.Body)
   226  		if err != nil {
   227  			logger.Info("failed-to-read-response-body",
   228  				lager.Data{"error": err.Error(), "status-code": resp.StatusCode})
   229  			return
   230  		}
   231  		logger.Info("received-non-2xx-response-status-code",
   232  			lager.Data{"response-body": string(bodyBytes), "status-code": resp.StatusCode})
   233  		return
   234  	}
   235  }
   236  
   237  func (emitter *NewRelicEmitter) marshalJSON(logger lager.Logger, batch []NewRelicEvent) (io.Reader, error) {
   238  	var batchJson *bytes.Buffer
   239  	if emitter.DisableCompression {
   240  		marshaled, err := json.Marshal(batch)
   241  		if err != nil {
   242  			logger.Error("failed-to-serialize-payload", err)
   243  			return nil, err
   244  		}
   245  		batchJson = bytes.NewBuffer(marshaled)
   246  	} else {
   247  		batchJson = bytes.NewBuffer([]byte{})
   248  		encoder := gzip.NewWriter(batchJson)
   249  		defer encoder.Close()
   250  		err := json.NewEncoder(encoder).Encode(batch)
   251  		if err != nil {
   252  			logger.Error("failed-to-compress-and-serialize-payload", err)
   253  			return nil, err
   254  		}
   255  	}
   256  	return batchJson, nil
   257  }