github.com/yankunsam/loki/v2@v2.6.3-0.20220817130409-389df5235c27/pkg/storage/stores/series/series_store_write.go (about) 1 package series 2 3 import ( 4 "context" 5 "fmt" 6 7 "github.com/go-kit/log/level" 8 "github.com/prometheus/client_golang/prometheus" 9 "github.com/prometheus/client_golang/prometheus/promauto" 10 "github.com/prometheus/common/model" 11 "github.com/prometheus/prometheus/model/labels" 12 13 "github.com/grafana/loki/pkg/storage/chunk" 14 "github.com/grafana/loki/pkg/storage/chunk/cache" 15 "github.com/grafana/loki/pkg/storage/chunk/client" 16 "github.com/grafana/loki/pkg/storage/chunk/fetcher" 17 "github.com/grafana/loki/pkg/storage/config" 18 "github.com/grafana/loki/pkg/storage/stores/series/index" 19 "github.com/grafana/loki/pkg/util/spanlogger" 20 ) 21 22 var ( 23 DedupedChunksTotal = promauto.NewCounter(prometheus.CounterOpts{ 24 Namespace: "loki", 25 Name: "chunk_store_deduped_chunks_total", 26 Help: "Count of chunks which were not stored because they have already been stored by another replica.", 27 }) 28 29 IndexEntriesPerChunk = promauto.NewHistogram(prometheus.HistogramOpts{ 30 Namespace: "loki", 31 Name: "chunk_store_index_entries_per_chunk", 32 Help: "Number of entries written to storage per chunk.", 33 Buckets: prometheus.ExponentialBuckets(1, 2, 5), 34 }) 35 ) 36 37 type IndexWriter interface { 38 NewWriteBatch() index.WriteBatch 39 BatchWrite(context.Context, index.WriteBatch) error 40 } 41 42 type SchemaWrites interface { 43 GetChunkWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]index.Entry, error) 44 GetCacheKeysAndLabelWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]string, [][]index.Entry, error) 45 } 46 47 type Writer struct { 48 writeDedupeCache cache.Cache 49 schemaCfg config.SchemaConfig 50 DisableIndexDeduplication bool 51 52 indexWriter IndexWriter 53 schema SchemaWrites 54 fetcher *fetcher.Fetcher 55 } 56 57 func NewWriter(fetcher *fetcher.Fetcher, schemaCfg config.SchemaConfig, indexWriter IndexWriter, schema SchemaWrites, writeDedupeCache cache.Cache, disableIndexDeduplication bool) *Writer { 58 return &Writer{ 59 writeDedupeCache: writeDedupeCache, 60 schemaCfg: schemaCfg, 61 DisableIndexDeduplication: disableIndexDeduplication, 62 fetcher: fetcher, 63 indexWriter: indexWriter, 64 schema: schema, 65 } 66 } 67 68 // Put implements Store 69 func (c *Writer) Put(ctx context.Context, chunks []chunk.Chunk) error { 70 for _, chunk := range chunks { 71 if err := c.PutOne(ctx, chunk.From, chunk.Through, chunk); err != nil { 72 return err 73 } 74 } 75 return nil 76 } 77 78 // PutOne implements Store 79 func (c *Writer) PutOne(ctx context.Context, from, through model.Time, chk chunk.Chunk) error { 80 log, ctx := spanlogger.New(ctx, "SeriesStore.PutOne") 81 defer log.Finish() 82 writeChunk := true 83 84 // If this chunk is in cache it must already be in the database so we don't need to write it again 85 found, _, _, _ := c.fetcher.Cache().Fetch(ctx, []string{c.schemaCfg.ExternalKey(chk.ChunkRef)}) 86 87 if len(found) > 0 { 88 writeChunk = false 89 DedupedChunksTotal.Inc() 90 } 91 92 // If we dont have to write the chunk and DisableIndexDeduplication is false, we do not have to do anything. 93 // If we dont have to write the chunk and DisableIndexDeduplication is true, we have to write index and not chunk. 94 // Otherwise write both index and chunk. 95 if !writeChunk && !c.DisableIndexDeduplication { 96 return nil 97 } 98 99 chunks := []chunk.Chunk{chk} 100 101 writeReqs, keysToCache, err := c.calculateIndexEntries(ctx, from, through, chk) 102 if err != nil { 103 return err 104 } 105 106 if oic, ok := c.fetcher.Client().(client.ObjectAndIndexClient); ok { 107 chunks := chunks 108 if !writeChunk { 109 chunks = []chunk.Chunk{} 110 } 111 if err = oic.PutChunksAndIndex(ctx, chunks, writeReqs); err != nil { 112 return err 113 } 114 } else { 115 // chunk not found, write it. 116 if writeChunk { 117 err := c.fetcher.Client().PutChunks(ctx, chunks) 118 if err != nil { 119 return err 120 } 121 } 122 if err := c.indexWriter.BatchWrite(ctx, writeReqs); err != nil { 123 return err 124 } 125 } 126 127 // we already have the chunk in the cache so don't write it back to the cache. 128 if writeChunk { 129 if cacheErr := c.fetcher.WriteBackCache(ctx, chunks); cacheErr != nil { 130 level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr) 131 } 132 } 133 134 bufs := make([][]byte, len(keysToCache)) 135 err = c.writeDedupeCache.Store(ctx, keysToCache, bufs) 136 if err != nil { 137 level.Warn(log).Log("msg", "could not Store store in write dedupe cache", "err", err) 138 } 139 return nil 140 } 141 142 // calculateIndexEntries creates a set of batched WriteRequests for all the chunks it is given. 143 func (c *Writer) calculateIndexEntries(ctx context.Context, from, through model.Time, chunk chunk.Chunk) (index.WriteBatch, []string, error) { 144 seenIndexEntries := map[string]struct{}{} 145 entries := []index.Entry{} 146 147 metricName := chunk.Metric.Get(labels.MetricName) 148 if metricName == "" { 149 return nil, nil, fmt.Errorf("no MetricNameLabel for chunk") 150 } 151 152 keys, labelEntries, err := c.schema.GetCacheKeysAndLabelWriteEntries(from, through, chunk.UserID, metricName, chunk.Metric, c.schemaCfg.ExternalKey(chunk.ChunkRef)) 153 if err != nil { 154 return nil, nil, err 155 } 156 _, _, missing, _ := c.writeDedupeCache.Fetch(ctx, keys) 157 // keys and labelEntries are matched in order, but Fetch() may 158 // return missing keys in any order so check against all of them. 159 for _, missingKey := range missing { 160 for i, key := range keys { 161 if key == missingKey { 162 entries = append(entries, labelEntries[i]...) 163 } 164 } 165 } 166 167 chunkEntries, err := c.schema.GetChunkWriteEntries(from, through, chunk.UserID, metricName, chunk.Metric, c.schemaCfg.ExternalKey(chunk.ChunkRef)) 168 if err != nil { 169 return nil, nil, err 170 } 171 entries = append(entries, chunkEntries...) 172 173 IndexEntriesPerChunk.Observe(float64(len(entries))) 174 175 // Remove duplicate entries based on tableName:hashValue:rangeValue 176 result := c.indexWriter.NewWriteBatch() 177 for _, entry := range entries { 178 key := fmt.Sprintf("%s:%s:%x", entry.TableName, entry.HashValue, entry.RangeValue) 179 if _, ok := seenIndexEntries[key]; !ok { 180 seenIndexEntries[key] = struct{}{} 181 result.Add(entry.TableName, entry.HashValue, entry.RangeValue, entry.Value) 182 } 183 } 184 185 return result, missing, nil 186 }