github.com/thanos-io/thanos@v0.32.5/internal/cortex/chunk/cache/memcached.go (about) 1 // Copyright (c) The Cortex Authors. 2 // Licensed under the Apache License 2.0. 3 4 package cache 5 6 import ( 7 "context" 8 "encoding/hex" 9 "flag" 10 "hash/fnv" 11 "sync" 12 "time" 13 14 "github.com/bradfitz/gomemcache/memcache" 15 "github.com/go-kit/log" 16 "github.com/go-kit/log/level" 17 otlog "github.com/opentracing/opentracing-go/log" 18 "github.com/prometheus/client_golang/prometheus" 19 "github.com/prometheus/client_golang/prometheus/promauto" 20 instr "github.com/weaveworks/common/instrument" 21 22 "github.com/thanos-io/thanos/internal/cortex/util/math" 23 "github.com/thanos-io/thanos/internal/cortex/util/spanlogger" 24 ) 25 26 // MemcachedConfig is config to make a Memcached 27 type MemcachedConfig struct { 28 Expiration time.Duration `yaml:"expiration"` 29 30 BatchSize int `yaml:"batch_size"` 31 Parallelism int `yaml:"parallelism"` 32 } 33 34 // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet 35 func (cfg *MemcachedConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { 36 f.DurationVar(&cfg.Expiration, prefix+"memcached.expiration", 0, description+"How long keys stay in the memcache.") 37 f.IntVar(&cfg.BatchSize, prefix+"memcached.batchsize", 1024, description+"How many keys to fetch in each batch.") 38 f.IntVar(&cfg.Parallelism, prefix+"memcached.parallelism", 100, description+"Maximum active requests to memcache.") 39 } 40 41 // Memcached type caches chunks in memcached 42 type Memcached struct { 43 cfg MemcachedConfig 44 memcache MemcachedClient 45 name string 46 47 requestDuration *instr.HistogramCollector 48 49 wg sync.WaitGroup 50 inputCh chan *work 51 quit chan struct{} 52 53 logger log.Logger 54 } 55 56 // NewMemcached makes a new Memcached. 57 func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string, reg prometheus.Registerer, logger log.Logger) *Memcached { 58 c := &Memcached{ 59 cfg: cfg, 60 memcache: client, 61 name: name, 62 logger: logger, 63 requestDuration: instr.NewHistogramCollector( 64 promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ 65 Namespace: "cortex", 66 Name: "memcache_request_duration_seconds", 67 Help: "Total time spent in seconds doing memcache requests.", 68 // Memcached requests are very quick: smallest bucket is 16us, biggest is 1s 69 Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8), 70 ConstLabels: prometheus.Labels{"name": name}, 71 }, []string{"method", "status_code"}), 72 ), 73 } 74 75 if cfg.BatchSize == 0 || cfg.Parallelism == 0 { 76 return c 77 } 78 79 c.inputCh = make(chan *work) 80 c.quit = make(chan struct{}) 81 c.wg.Add(cfg.Parallelism) 82 83 for i := 0; i < cfg.Parallelism; i++ { 84 go func() { 85 defer c.wg.Done() 86 for { 87 select { 88 case <-c.quit: 89 return 90 case input := <-c.inputCh: 91 res := &result{ 92 batchID: input.batchID, 93 } 94 res.found, res.bufs, res.missed = c.fetch(input.ctx, input.keys) 95 // No-one will be reading from resultCh if we were asked to quit 96 // during the fetch, so check again before writing to it. 97 select { 98 case <-c.quit: 99 return 100 case input.resultCh <- res: 101 } 102 } 103 } 104 }() 105 } 106 107 return c 108 } 109 110 type work struct { 111 keys []string 112 ctx context.Context 113 resultCh chan<- *result 114 batchID int // For ordering results. 115 } 116 117 type result struct { 118 found []string 119 bufs [][]byte 120 missed []string 121 batchID int // For ordering results. 122 } 123 124 func memcacheStatusCode(err error) string { 125 // See https://godoc.org/github.com/bradfitz/gomemcache/memcache#pkg-variables 126 switch err { 127 case nil: 128 return "200" 129 case memcache.ErrCacheMiss: 130 return "404" 131 case memcache.ErrMalformedKey: 132 return "400" 133 default: 134 return "500" 135 } 136 } 137 138 // Fetch gets keys from the cache. The keys that are found must be in the order of the keys requested. 139 func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { 140 if c.cfg.BatchSize == 0 { 141 found, bufs, missed = c.fetch(ctx, keys) 142 return 143 } 144 _ = instr.CollectedRequest(ctx, "Memcache.GetBatched", c.requestDuration, memcacheStatusCode, func(ctx context.Context) error { 145 found, bufs, missed = c.fetchKeysBatched(ctx, keys) 146 return nil 147 }) 148 return 149 } 150 func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { 151 var items map[string]*memcache.Item 152 const method = "Memcache.GetMulti" 153 err := instr.CollectedRequest(ctx, method, c.requestDuration, memcacheStatusCode, func(innerCtx context.Context) error { 154 log, _ := spanlogger.New(innerCtx, method) 155 defer log.Finish() 156 log.LogFields(otlog.Int("keys requested", len(keys))) 157 158 var err error 159 items, err = c.memcache.GetMulti(keys) 160 161 log.LogFields(otlog.Int("keys found", len(items))) 162 163 // Memcached returns partial results even on error. 164 if err != nil { 165 log.Error(err) 166 level.Error(log).Log("msg", "Failed to get keys from memcached", "err", err) 167 } 168 return err 169 }) 170 171 if err != nil { 172 return found, bufs, keys 173 } 174 175 for _, key := range keys { 176 item, ok := items[key] 177 if ok { 178 found = append(found, key) 179 bufs = append(bufs, item.Value) 180 } else { 181 missed = append(missed, key) 182 } 183 } 184 return 185 } 186 187 func (c *Memcached) fetchKeysBatched(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { 188 resultsCh := make(chan *result) 189 batchSize := c.cfg.BatchSize 190 191 go func() { 192 for i, j := 0, 0; i < len(keys); i += batchSize { 193 batchKeys := keys[i:math.Min(i+batchSize, len(keys))] 194 select { 195 case <-c.quit: 196 return 197 case c.inputCh <- &work{ 198 keys: batchKeys, 199 ctx: ctx, 200 resultCh: resultsCh, 201 batchID: j, 202 }: 203 } 204 j++ 205 } 206 }() 207 208 // Read all values from this channel to avoid blocking upstream. 209 numResults := len(keys) / batchSize 210 if len(keys)%batchSize != 0 { 211 numResults++ 212 } 213 214 // We need to order found by the input keys order. 215 results := make([]*result, numResults) 216 loopResults: 217 for i := 0; i < numResults; i++ { 218 select { 219 case <-c.quit: 220 break loopResults 221 case result := <-resultsCh: 222 results[result.batchID] = result 223 } 224 } 225 226 for _, result := range results { 227 if result == nil { 228 continue 229 } 230 found = append(found, result.found...) 231 bufs = append(bufs, result.bufs...) 232 missed = append(missed, result.missed...) 233 } 234 235 return 236 } 237 238 // Store stores the key in the cache. 239 func (c *Memcached) Store(ctx context.Context, keys []string, bufs [][]byte) { 240 for i := range keys { 241 err := instr.CollectedRequest(ctx, "Memcache.Put", c.requestDuration, memcacheStatusCode, func(_ context.Context) error { 242 item := memcache.Item{ 243 Key: keys[i], 244 Value: bufs[i], 245 Expiration: int32(c.cfg.Expiration.Seconds()), 246 } 247 return c.memcache.Set(&item) 248 }) 249 if err != nil { 250 level.Error(c.logger).Log("msg", "failed to put to memcached", "name", c.name, "err", err) 251 } 252 } 253 } 254 255 // Stop does nothing. 256 func (c *Memcached) Stop() { 257 if c.quit == nil { 258 return 259 } 260 261 select { 262 case <-c.quit: 263 default: 264 close(c.quit) 265 } 266 c.wg.Wait() 267 } 268 269 // HashKey hashes key into something you can store in memcached. 270 func HashKey(key string) string { 271 hasher := fnv.New64a() 272 _, _ = hasher.Write([]byte(key)) // This'll never error. 273 274 // Hex because memcache errors for the bytes produced by the hash. 275 return hex.EncodeToString(hasher.Sum(nil)) 276 }