github.com/muhammadn/cortex@v1.9.1-0.20220510110439-46bb7000d03d/pkg/chunk/chunk.go (about)

     1  package chunk
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"fmt"
     7  	"hash/crc32"
     8  	"strconv"
     9  	"strings"
    10  	"sync"
    11  
    12  	"github.com/golang/snappy"
    13  	jsoniter "github.com/json-iterator/go"
    14  	"github.com/pkg/errors"
    15  	"github.com/prometheus/common/model"
    16  	"github.com/prometheus/prometheus/pkg/labels"
    17  	errs "github.com/weaveworks/common/errors"
    18  
    19  	prom_chunk "github.com/cortexproject/cortex/pkg/chunk/encoding"
    20  	"github.com/cortexproject/cortex/pkg/prom1/storage/metric"
    21  )
    22  
    23  const (
    24  	ErrInvalidChecksum = errs.Error("invalid chunk checksum")
    25  	ErrWrongMetadata   = errs.Error("wrong chunk metadata")
    26  	ErrMetadataLength  = errs.Error("chunk metadata wrong length")
    27  	ErrDataLength      = errs.Error("chunk data wrong length")
    28  	ErrSliceOutOfRange = errs.Error("chunk can't be sliced out of its data range")
    29  )
    30  
    31  var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
    32  
    33  func errInvalidChunkID(s string) error {
    34  	return errors.Errorf("invalid chunk ID %q", s)
    35  }
    36  
    37  // Chunk contains encoded timeseries data
    38  type Chunk struct {
    39  	// These two fields will be missing from older chunks (as will the hash).
    40  	// On fetch we will initialise these fields from the DynamoDB key.
    41  	Fingerprint model.Fingerprint `json:"fingerprint"`
    42  	UserID      string            `json:"userID"`
    43  
    44  	// These fields will be in all chunks, including old ones.
    45  	From    model.Time    `json:"from"`
    46  	Through model.Time    `json:"through"`
    47  	Metric  labels.Labels `json:"metric"`
    48  
    49  	// The hash is not written to the external storage either.  We use
    50  	// crc32, Castagnoli table.  See http://www.evanjones.ca/crc32c.html.
    51  	// For old chunks, ChecksumSet will be false.
    52  	ChecksumSet bool   `json:"-"`
    53  	Checksum    uint32 `json:"-"`
    54  
    55  	// We never use Delta encoding (the zero value), so if this entry is
    56  	// missing, we default to DoubleDelta.
    57  	Encoding prom_chunk.Encoding `json:"encoding"`
    58  	Data     prom_chunk.Chunk    `json:"-"`
    59  
    60  	// The encoded version of the chunk, held so we don't need to re-encode it
    61  	encoded []byte
    62  }
    63  
    64  // NewChunk creates a new chunk
    65  func NewChunk(userID string, fp model.Fingerprint, metric labels.Labels, c prom_chunk.Chunk, from, through model.Time) Chunk {
    66  	return Chunk{
    67  		Fingerprint: fp,
    68  		UserID:      userID,
    69  		From:        from,
    70  		Through:     through,
    71  		Metric:      metric,
    72  		Encoding:    c.Encoding(),
    73  		Data:        c,
    74  	}
    75  }
    76  
    77  // ParseExternalKey is used to construct a partially-populated chunk from the
    78  // key in DynamoDB.  This chunk can then be used to calculate the key needed
    79  // to fetch the Chunk data from Memcache/S3, and then fully populate the chunk
    80  // with decode().
    81  //
    82  // Pre-checksums, the keys written to DynamoDB looked like
    83  // `<fingerprint>:<start time>:<end time>` (aka the ID), and the key for
    84  // memcache and S3 was `<user id>/<fingerprint>:<start time>:<end time>.
    85  // Finger prints and times were written in base-10.
    86  //
    87  // Post-checksums, externals keys become the same across DynamoDB, Memcache
    88  // and S3.  Numbers become hex encoded.  Keys look like:
    89  // `<user id>/<fingerprint>:<start time>:<end time>:<checksum>`.
    90  func ParseExternalKey(userID, externalKey string) (Chunk, error) {
    91  	if !strings.Contains(externalKey, "/") {
    92  		return parseLegacyChunkID(userID, externalKey)
    93  	}
    94  	chunk, err := parseNewExternalKey(externalKey)
    95  	if err != nil {
    96  		return Chunk{}, err
    97  	}
    98  	if chunk.UserID != userID {
    99  		return Chunk{}, errors.WithStack(ErrWrongMetadata)
   100  	}
   101  	return chunk, nil
   102  }
   103  
   104  func parseLegacyChunkID(userID, key string) (Chunk, error) {
   105  	parts := strings.Split(key, ":")
   106  	if len(parts) != 3 {
   107  		return Chunk{}, errInvalidChunkID(key)
   108  	}
   109  	fingerprint, err := strconv.ParseUint(parts[0], 10, 64)
   110  	if err != nil {
   111  		return Chunk{}, err
   112  	}
   113  	from, err := strconv.ParseInt(parts[1], 10, 64)
   114  	if err != nil {
   115  		return Chunk{}, err
   116  	}
   117  	through, err := strconv.ParseInt(parts[2], 10, 64)
   118  	if err != nil {
   119  		return Chunk{}, err
   120  	}
   121  	return Chunk{
   122  		UserID:      userID,
   123  		Fingerprint: model.Fingerprint(fingerprint),
   124  		From:        model.Time(from),
   125  		Through:     model.Time(through),
   126  	}, nil
   127  }
   128  
   129  func parseNewExternalKey(key string) (Chunk, error) {
   130  	parts := strings.Split(key, "/")
   131  	if len(parts) != 2 {
   132  		return Chunk{}, errInvalidChunkID(key)
   133  	}
   134  	userID := parts[0]
   135  	hexParts := strings.Split(parts[1], ":")
   136  	if len(hexParts) != 4 {
   137  		return Chunk{}, errInvalidChunkID(key)
   138  	}
   139  	fingerprint, err := strconv.ParseUint(hexParts[0], 16, 64)
   140  	if err != nil {
   141  		return Chunk{}, err
   142  	}
   143  	from, err := strconv.ParseInt(hexParts[1], 16, 64)
   144  	if err != nil {
   145  		return Chunk{}, err
   146  	}
   147  	through, err := strconv.ParseInt(hexParts[2], 16, 64)
   148  	if err != nil {
   149  		return Chunk{}, err
   150  	}
   151  	checksum, err := strconv.ParseUint(hexParts[3], 16, 32)
   152  	if err != nil {
   153  		return Chunk{}, err
   154  	}
   155  	return Chunk{
   156  		UserID:      userID,
   157  		Fingerprint: model.Fingerprint(fingerprint),
   158  		From:        model.Time(from),
   159  		Through:     model.Time(through),
   160  		Checksum:    uint32(checksum),
   161  		ChecksumSet: true,
   162  	}, nil
   163  }
   164  
   165  // ExternalKey returns the key you can use to fetch this chunk from external
   166  // storage. For newer chunks, this key includes a checksum.
   167  func (c *Chunk) ExternalKey() string {
   168  	// Some chunks have a checksum stored in dynamodb, some do not.  We must
   169  	// generate keys appropriately.
   170  	if c.ChecksumSet {
   171  		// This is the inverse of parseNewExternalKey.
   172  		return fmt.Sprintf("%s/%x:%x:%x:%x", c.UserID, uint64(c.Fingerprint), int64(c.From), int64(c.Through), c.Checksum)
   173  	}
   174  	// This is the inverse of parseLegacyExternalKey, with "<user id>/" prepended.
   175  	// Legacy chunks had the user ID prefix on s3/memcache, but not in DynamoDB.
   176  	// See comment on parseExternalKey.
   177  	return fmt.Sprintf("%s/%d:%d:%d", c.UserID, uint64(c.Fingerprint), int64(c.From), int64(c.Through))
   178  }
   179  
   180  var writerPool = sync.Pool{
   181  	New: func() interface{} { return snappy.NewBufferedWriter(nil) },
   182  }
   183  
   184  // Encode writes the chunk into a buffer, and calculates the checksum.
   185  func (c *Chunk) Encode() error {
   186  	return c.EncodeTo(nil)
   187  }
   188  
   189  // EncodeTo is like Encode but you can provide your own buffer to use.
   190  func (c *Chunk) EncodeTo(buf *bytes.Buffer) error {
   191  	if buf == nil {
   192  		buf = bytes.NewBuffer(nil)
   193  	}
   194  	// Write 4 empty bytes first - we will come back and put the len in here.
   195  	metadataLenBytes := [4]byte{}
   196  	if _, err := buf.Write(metadataLenBytes[:]); err != nil {
   197  		return err
   198  	}
   199  
   200  	// Encode chunk metadata into snappy-compressed buffer
   201  	writer := writerPool.Get().(*snappy.Writer)
   202  	defer writerPool.Put(writer)
   203  	writer.Reset(buf)
   204  	json := jsoniter.ConfigFastest
   205  	if err := json.NewEncoder(writer).Encode(c); err != nil {
   206  		return err
   207  	}
   208  	writer.Close()
   209  
   210  	// Write the metadata length back at the start of the buffer.
   211  	// (note this length includes the 4 bytes for the length itself)
   212  	metadataLen := buf.Len()
   213  	binary.BigEndian.PutUint32(metadataLenBytes[:], uint32(metadataLen))
   214  	copy(buf.Bytes(), metadataLenBytes[:])
   215  
   216  	// Write another 4 empty bytes - we will come back and put the len in here.
   217  	dataLenBytes := [4]byte{}
   218  	if _, err := buf.Write(dataLenBytes[:]); err != nil {
   219  		return err
   220  	}
   221  
   222  	// And now the chunk data
   223  	if err := c.Data.Marshal(buf); err != nil {
   224  		return err
   225  	}
   226  
   227  	// Now write the data len back into the buf.
   228  	binary.BigEndian.PutUint32(dataLenBytes[:], uint32(buf.Len()-metadataLen-4))
   229  	copy(buf.Bytes()[metadataLen:], dataLenBytes[:])
   230  
   231  	// Now work out the checksum
   232  	c.encoded = buf.Bytes()
   233  	c.ChecksumSet = true
   234  	c.Checksum = crc32.Checksum(c.encoded, castagnoliTable)
   235  	return nil
   236  }
   237  
   238  // Encoded returns the buffer created by Encoded()
   239  func (c *Chunk) Encoded() ([]byte, error) {
   240  	if c.encoded == nil {
   241  		if err := c.Encode(); err != nil {
   242  			return nil, err
   243  		}
   244  	}
   245  	return c.encoded, nil
   246  }
   247  
   248  // DecodeContext holds data that can be re-used between decodes of different chunks
   249  type DecodeContext struct {
   250  	reader *snappy.Reader
   251  }
   252  
   253  // NewDecodeContext creates a new, blank, DecodeContext
   254  func NewDecodeContext() *DecodeContext {
   255  	return &DecodeContext{
   256  		reader: snappy.NewReader(nil),
   257  	}
   258  }
   259  
   260  // Decode the chunk from the given buffer, and confirm the chunk is the one we
   261  // expected.
   262  func (c *Chunk) Decode(decodeContext *DecodeContext, input []byte) error {
   263  	// First, calculate the checksum of the chunk and confirm it matches
   264  	// what we expected.
   265  	if c.ChecksumSet && c.Checksum != crc32.Checksum(input, castagnoliTable) {
   266  		return errors.WithStack(ErrInvalidChecksum)
   267  	}
   268  
   269  	// Now unmarshal the chunk metadata.
   270  	r := bytes.NewReader(input)
   271  	var metadataLen uint32
   272  	if err := binary.Read(r, binary.BigEndian, &metadataLen); err != nil {
   273  		return errors.Wrap(err, "when reading metadata length from chunk")
   274  	}
   275  	var tempMetadata Chunk
   276  	decodeContext.reader.Reset(r)
   277  	json := jsoniter.ConfigFastest
   278  	err := json.NewDecoder(decodeContext.reader).Decode(&tempMetadata)
   279  	if err != nil {
   280  		return errors.Wrap(err, "when decoding chunk metadata")
   281  	}
   282  	metadataRead := len(input) - r.Len()
   283  	// Older versions of Cortex included the initial length word; newer versions do not.
   284  	if !(metadataRead == int(metadataLen) || metadataRead == int(metadataLen)+4) {
   285  		return errors.Wrapf(ErrMetadataLength, "expected %d, got %d", metadataLen, metadataRead)
   286  	}
   287  
   288  	// Next, confirm the chunks matches what we expected.  Easiest way to do this
   289  	// is to compare what the decoded data thinks its external ID would be, but
   290  	// we don't write the checksum to s3, so we have to copy the checksum in.
   291  	if c.ChecksumSet {
   292  		tempMetadata.Checksum, tempMetadata.ChecksumSet = c.Checksum, c.ChecksumSet
   293  		if !equalByKey(*c, tempMetadata) {
   294  			return errors.WithStack(ErrWrongMetadata)
   295  		}
   296  	}
   297  	*c = tempMetadata
   298  
   299  	// Older chunks always used DoubleDelta and did not write Encoding
   300  	// to JSON, so override if it has the zero value (Delta)
   301  	if c.Encoding == prom_chunk.Delta {
   302  		c.Encoding = prom_chunk.DoubleDelta
   303  	}
   304  
   305  	// Finally, unmarshal the actual chunk data.
   306  	c.Data, err = prom_chunk.NewForEncoding(c.Encoding)
   307  	if err != nil {
   308  		return errors.Wrap(err, "when creating new chunk")
   309  	}
   310  
   311  	var dataLen uint32
   312  	if err := binary.Read(r, binary.BigEndian, &dataLen); err != nil {
   313  		return errors.Wrap(err, "when reading data length from chunk")
   314  	}
   315  
   316  	c.encoded = input
   317  	remainingData := input[len(input)-r.Len():]
   318  	if int(dataLen) != len(remainingData) {
   319  		return ErrDataLength
   320  	}
   321  
   322  	return c.Data.UnmarshalFromBuf(remainingData[:int(dataLen)])
   323  }
   324  
   325  func equalByKey(a, b Chunk) bool {
   326  	return a.UserID == b.UserID && a.Fingerprint == b.Fingerprint &&
   327  		a.From == b.From && a.Through == b.Through && a.Checksum == b.Checksum
   328  }
   329  
   330  // Samples returns all SamplePairs for the chunk.
   331  func (c *Chunk) Samples(from, through model.Time) ([]model.SamplePair, error) {
   332  	it := c.Data.NewIterator(nil)
   333  	interval := metric.Interval{OldestInclusive: from, NewestInclusive: through}
   334  	return prom_chunk.RangeValues(it, interval)
   335  }
   336  
   337  // Slice builds a new smaller chunk with data only from given time range (inclusive)
   338  func (c *Chunk) Slice(from, through model.Time) (*Chunk, error) {
   339  	// there should be atleast some overlap between chunk interval and slice interval
   340  	if from > c.Through || through < c.From {
   341  		return nil, ErrSliceOutOfRange
   342  	}
   343  
   344  	pc, err := c.Data.Rebound(from, through)
   345  	if err != nil {
   346  		return nil, err
   347  	}
   348  
   349  	nc := NewChunk(c.UserID, c.Fingerprint, c.Metric, pc, from, through)
   350  	return &nc, nil
   351  }
   352  
   353  func intervalsOverlap(interval1, interval2 model.Interval) bool {
   354  	if interval1.Start > interval2.End || interval2.Start > interval1.End {
   355  		return false
   356  	}
   357  
   358  	return true
   359  }