github.com/google/cloudprober@v0.11.3/surfacers/postgres/postgres.go (about)

     1  // Copyright 2018 The Cloudprober Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this postgres except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  /*
    16  Package postgres implements "postgres" surfacer. This surfacer type is in
    17  experimental phase right now.
    18  
    19  To use this surfacer, add a stanza similar to the following to your
    20  cloudprober config:
    21  
    22  surfacer {
    23    type: POSTGRES
    24  	postgres_surfacer {
    25  	  connection_string: "postgresql://root:root@localhost/cloudprober?sslmode=disable"
    26  	  metrics_table_name: "metrics"
    27    }
    28  }
    29  */
    30  package postgres
    31  
    32  import (
    33  	"context"
    34  
    35  	"github.com/google/cloudprober/logger"
    36  	"github.com/google/cloudprober/metrics"
    37  
    38  	"database/sql"
    39  	"encoding/json"
    40  	"strconv"
    41  	"time"
    42  
    43  	"github.com/lib/pq"
    44  
    45  	configpb "github.com/google/cloudprober/surfacers/postgres/proto"
    46  )
    47  
    48  // pgMetric represents a single metric and corresponds to a single row in the
    49  // metrics table.
    50  type pgMetric struct {
    51  	time       time.Time
    52  	metricName string
    53  	value      string
    54  	labels     map[string]string
    55  }
    56  
    57  func updateLabelMap(labels map[string]string, extraLabels ...[2]string) map[string]string {
    58  	if len(extraLabels) == 0 {
    59  		return labels
    60  	}
    61  	labelsCopy := make(map[string]string)
    62  	for k, v := range labels {
    63  		labelsCopy[k] = v
    64  	}
    65  	for _, extraLabel := range extraLabels {
    66  		labelsCopy[extraLabel[0]] = extraLabel[1]
    67  	}
    68  	return labelsCopy
    69  }
    70  
    71  // labelsJSON takes the labels array and formats it for insertion into
    72  // postgres jsonb labels column, storing each label as k,v json object
    73  func labelsJSON(labels map[string]string) (string, error) {
    74  	bs, err := json.Marshal(labels)
    75  	if err != nil {
    76  		return "", err
    77  	}
    78  
    79  	return string(bs), nil
    80  }
    81  
    82  func newPGMetric(t time.Time, metricName, val string, labels map[string]string) pgMetric {
    83  	return pgMetric{
    84  		time:       t,
    85  		metricName: metricName,
    86  		value:      val,
    87  		labels:     labels,
    88  	}
    89  }
    90  
    91  func distToPGMetrics(d *metrics.DistributionData, metricName string, labels map[string]string, t time.Time) []pgMetric {
    92  	pgMerics := []pgMetric{
    93  		newPGMetric(t, metricName+"_sum", strconv.FormatFloat(d.Sum, 'f', -1, 64), labels),
    94  		newPGMetric(t, metricName+"_count", strconv.FormatInt(d.Count, 10), labels),
    95  	}
    96  
    97  	// Create and format all metrics for each bucket in this distribution. Each
    98  	// bucket is assigned a metric name suffixed with "_bucket" and labeled with
    99  	// the corresponding bucket as "le: {bucket}"
   100  	var val int64
   101  	for i := range d.LowerBounds {
   102  		val += d.BucketCounts[i]
   103  		var lb string
   104  		if i == len(d.LowerBounds)-1 {
   105  			lb = "+Inf"
   106  		} else {
   107  			lb = strconv.FormatFloat(d.LowerBounds[i+1], 'f', -1, 64)
   108  		}
   109  		labelsWithBucket := updateLabelMap(labels, [2]string{"le", lb})
   110  		pgMerics = append(pgMerics, newPGMetric(t, metricName+"_bucket", strconv.FormatInt(val, 10), labelsWithBucket))
   111  	}
   112  
   113  	return pgMerics
   114  }
   115  
   116  // emToPGMetrics converts an EventMetrics struct into a list of pgMetrics.
   117  func emToPGMetrics(em *metrics.EventMetrics) []pgMetric {
   118  	baseLabels := make(map[string]string)
   119  	for _, k := range em.LabelsKeys() {
   120  		baseLabels[k] = em.Label(k)
   121  	}
   122  
   123  	pgMerics := []pgMetric{}
   124  	for _, metricName := range em.MetricsKeys() {
   125  		val := em.Metric(metricName)
   126  
   127  		// Map metric
   128  		if mapVal, ok := val.(*metrics.Map); ok {
   129  			for _, k := range mapVal.Keys() {
   130  				labels := updateLabelMap(baseLabels, [2]string{mapVal.MapName, k})
   131  				pgMerics = append(pgMerics, newPGMetric(em.Timestamp, metricName, mapVal.GetKey(k).String(), labels))
   132  			}
   133  			continue
   134  		}
   135  
   136  		// Distribution metric
   137  		if distVal, ok := val.(*metrics.Distribution); ok {
   138  			pgMerics = append(pgMerics, distToPGMetrics(distVal.Data(), metricName, baseLabels, em.Timestamp)...)
   139  			continue
   140  		}
   141  
   142  		// Convert string metrics to a numeric metric by moving metric value to
   143  		// the "val" label and setting the metric value to 1.
   144  		// For example: version="1.11" becomes version{val="1.11"}=1
   145  		if _, ok := val.(metrics.String); ok {
   146  			labels := updateLabelMap(baseLabels, [2]string{"val", val.String()})
   147  			pgMerics = append(pgMerics, newPGMetric(em.Timestamp, metricName, "1", labels))
   148  			continue
   149  		}
   150  
   151  		pgMerics = append(pgMerics, newPGMetric(em.Timestamp, metricName, val.String(), baseLabels))
   152  	}
   153  	return pgMerics
   154  }
   155  
   156  // Surfacer structures for writing to postgres.
   157  type Surfacer struct {
   158  	// Configuration
   159  	c *configpb.SurfacerConf
   160  
   161  	// Channel for incoming data.
   162  	writeChan chan *metrics.EventMetrics
   163  
   164  	// Cloud logger
   165  	l *logger.Logger
   166  
   167  	openDB func(connectionString string) (*sql.DB, error)
   168  	db     *sql.DB
   169  }
   170  
   171  // New initializes a Postgres surfacer. Postgres surfacer inserts probe results
   172  // into a postgres database.
   173  func New(ctx context.Context, config *configpb.SurfacerConf, l *logger.Logger) (*Surfacer, error) {
   174  	s := &Surfacer{
   175  		c: config,
   176  		l: l,
   177  		openDB: func(cs string) (*sql.DB, error) {
   178  			return sql.Open("postgres", cs)
   179  		},
   180  	}
   181  	return s, s.init(ctx)
   182  }
   183  
   184  // writeMetrics parses events metrics into postgres rows, starts a transaction
   185  // and inserts all discreet metric rows represented by the EventMetrics
   186  func (s *Surfacer) writeMetrics(em *metrics.EventMetrics) error {
   187  	// Begin a transaction.
   188  	txn, err := s.db.Begin()
   189  	if err != nil {
   190  		return err
   191  	}
   192  
   193  	// Prepare a statement to COPY table from the STDIN.
   194  	stmt, err := txn.Prepare(pq.CopyIn(s.c.GetMetricsTableName(), "time", "metric_name", "value", "labels"))
   195  	if err != nil {
   196  		return err
   197  	}
   198  
   199  	for _, pgMetric := range emToPGMetrics(em) {
   200  		var s string
   201  		if s, err = labelsJSON(pgMetric.labels); err != nil {
   202  			return err
   203  		}
   204  		if _, err = stmt.Exec(pgMetric.time, pgMetric.metricName, pgMetric.value, s); err != nil {
   205  			return err
   206  		}
   207  	}
   208  
   209  	if _, err = stmt.Exec(); err != nil {
   210  		return err
   211  	}
   212  	if err = stmt.Close(); err != nil {
   213  		return err
   214  	}
   215  
   216  	return txn.Commit()
   217  }
   218  
   219  // init connects to postgres
   220  func (s *Surfacer) init(ctx context.Context) error {
   221  	var err error
   222  
   223  	if s.db, err = s.openDB(s.c.GetConnectionString()); err != nil {
   224  		return err
   225  	}
   226  	if err = s.db.Ping(); err != nil {
   227  		return err
   228  	}
   229  	s.writeChan = make(chan *metrics.EventMetrics, s.c.GetMetricsBufferSize())
   230  
   231  	// Start a goroutine to run forever, polling on the writeChan. Allows
   232  	// for the surfacer to write asynchronously to the serial port.
   233  	go func() {
   234  		defer s.db.Close()
   235  
   236  		for {
   237  			select {
   238  			case <-ctx.Done():
   239  				s.l.Infof("Context canceled, stopping the surfacer write loop")
   240  				return
   241  			case em := <-s.writeChan:
   242  				if em.Kind != metrics.CUMULATIVE && em.Kind != metrics.GAUGE {
   243  					continue
   244  				}
   245  				// Note: we may want to batch calls to writeMetrics, as each call results in
   246  				// a database transaction.
   247  				if err := s.writeMetrics(em); err != nil {
   248  					s.l.Warningf("Error while writing metrics: %v", err)
   249  				}
   250  			}
   251  		}
   252  	}()
   253  
   254  	return nil
   255  }
   256  
   257  // Write takes the data to be written
   258  func (s *Surfacer) Write(ctx context.Context, em *metrics.EventMetrics) {
   259  	select {
   260  	case s.writeChan <- em:
   261  	default:
   262  		s.l.Errorf("Surfacer's write channel is full, dropping new data.")
   263  	}
   264  }