github.com/sequix/cortex@v1.1.6/pkg/chunk/cache/memcached.go (about) 1 package cache 2 3 import ( 4 "context" 5 "encoding/hex" 6 "flag" 7 "hash/fnv" 8 "sync" 9 "time" 10 11 "github.com/bradfitz/gomemcache/memcache" 12 "github.com/sequix/cortex/pkg/util" 13 "github.com/go-kit/kit/log/level" 14 opentracing "github.com/opentracing/opentracing-go" 15 otlog "github.com/opentracing/opentracing-go/log" 16 "github.com/prometheus/client_golang/prometheus" 17 "github.com/prometheus/client_golang/prometheus/promauto" 18 instr "github.com/weaveworks/common/instrument" 19 ) 20 21 var ( 22 memcacheRequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ 23 Namespace: "cortex", 24 Name: "memcache_request_duration_seconds", 25 Help: "Total time spent in seconds doing memcache requests.", 26 // Memecache requests are very quick: smallest bucket is 16us, biggest is 1s 27 Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8), 28 }, []string{"method", "status_code", "name"}) 29 ) 30 31 type observableVecCollector struct { 32 v prometheus.ObserverVec 33 } 34 35 func (observableVecCollector) Register() {} 36 func (observableVecCollector) Before(method string, start time.Time) {} 37 func (o observableVecCollector) After(method, statusCode string, start time.Time) { 38 o.v.WithLabelValues(method, statusCode).Observe(time.Now().Sub(start).Seconds()) 39 } 40 41 // MemcachedConfig is config to make a Memcached 42 type MemcachedConfig struct { 43 Expiration time.Duration `yaml:"expiration,omitempty"` 44 45 BatchSize int `yaml:"batch_size,omitempty"` 46 Parallelism int `yaml:"parallelism,omitempty"` 47 } 48 49 // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet 50 func (cfg *MemcachedConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { 51 f.DurationVar(&cfg.Expiration, prefix+"memcached.expiration", 0, description+"How long keys stay in the memcache.") 52 f.IntVar(&cfg.BatchSize, prefix+"memcached.batchsize", 0, description+"How many keys to fetch in each batch.") 53 f.IntVar(&cfg.Parallelism, prefix+"memcached.parallelism", 100, description+"Maximum active requests to memcache.") 54 } 55 56 // Memcached type caches chunks in memcached 57 type Memcached struct { 58 cfg MemcachedConfig 59 memcache MemcachedClient 60 name string 61 62 requestDuration observableVecCollector 63 64 wg sync.WaitGroup 65 inputCh chan *work 66 } 67 68 // NewMemcached makes a new Memcache. 69 // TODO(bwplotka): Fix metrics, get them out of globals, separate or allow prefixing. 70 // TODO(bwplotka): Remove globals & util packages from cache package entirely (e.g util.Logger). 71 func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string) *Memcached { 72 c := &Memcached{ 73 cfg: cfg, 74 memcache: client, 75 name: name, 76 requestDuration: observableVecCollector{ 77 v: memcacheRequestDuration.MustCurryWith(prometheus.Labels{ 78 "name": name, 79 }), 80 }, 81 } 82 83 if cfg.BatchSize == 0 || cfg.Parallelism == 0 { 84 return c 85 } 86 87 c.inputCh = make(chan *work) 88 c.wg.Add(cfg.Parallelism) 89 90 for i := 0; i < cfg.Parallelism; i++ { 91 go func() { 92 for input := range c.inputCh { 93 res := &result{ 94 batchID: input.batchID, 95 } 96 res.found, res.bufs, res.missed = c.fetch(input.ctx, input.keys) 97 input.resultCh <- res 98 } 99 100 c.wg.Done() 101 }() 102 } 103 104 return c 105 } 106 107 type work struct { 108 keys []string 109 ctx context.Context 110 resultCh chan<- *result 111 batchID int // For ordering results. 112 } 113 114 type result struct { 115 found []string 116 bufs [][]byte 117 missed []string 118 batchID int // For ordering results. 119 } 120 121 func memcacheStatusCode(err error) string { 122 // See https://godoc.org/github.com/bradfitz/gomemcache/memcache#pkg-variables 123 switch err { 124 case nil: 125 return "200" 126 case memcache.ErrCacheMiss: 127 return "404" 128 case memcache.ErrMalformedKey: 129 return "400" 130 default: 131 return "500" 132 } 133 } 134 135 // Fetch gets keys from the cache. The keys that are found must be in the order of the keys requested. 136 func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { 137 instr.CollectedRequest(ctx, "Memcache.Get", c.requestDuration, memcacheStatusCode, func(ctx context.Context) error { 138 if c.cfg.BatchSize == 0 { 139 found, bufs, missed = c.fetch(ctx, keys) 140 return nil 141 } 142 143 found, bufs, missed = c.fetchKeysBatched(ctx, keys) 144 return nil 145 }) 146 return 147 } 148 149 func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { 150 var items map[string]*memcache.Item 151 instr.CollectedRequest(ctx, "Memcache.GetMulti", c.requestDuration, memcacheStatusCode, func(_ context.Context) error { 152 sp := opentracing.SpanFromContext(ctx) 153 sp.LogFields(otlog.Int("keys requested", len(keys))) 154 155 var err error 156 items, err = c.memcache.GetMulti(keys) 157 158 sp.LogFields(otlog.Int("keys found", len(items))) 159 160 // Memcached returns partial results even on error. 161 if err != nil { 162 sp.LogFields(otlog.Error(err)) 163 level.Error(util.Logger).Log("msg", "Failed to get keys from memcached", "err", err) 164 } 165 return err 166 }) 167 168 for _, key := range keys { 169 item, ok := items[key] 170 if ok { 171 found = append(found, key) 172 bufs = append(bufs, item.Value) 173 } else { 174 missed = append(missed, key) 175 } 176 } 177 return 178 } 179 180 func (c *Memcached) fetchKeysBatched(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { 181 resultsCh := make(chan *result) 182 batchSize := c.cfg.BatchSize 183 184 go func() { 185 for i, j := 0, 0; i < len(keys); i += batchSize { 186 batchKeys := keys[i:util.Min(i+batchSize, len(keys))] 187 c.inputCh <- &work{ 188 keys: batchKeys, 189 ctx: ctx, 190 resultCh: resultsCh, 191 batchID: j, 192 } 193 j++ 194 } 195 }() 196 197 // Read all values from this channel to avoid blocking upstream. 198 numResults := len(keys) / batchSize 199 if len(keys)%batchSize != 0 { 200 numResults++ 201 } 202 203 // We need to order found by the input keys order. 204 results := make([]*result, numResults) 205 for i := 0; i < numResults; i++ { 206 result := <-resultsCh 207 results[result.batchID] = result 208 } 209 close(resultsCh) 210 211 for _, result := range results { 212 found = append(found, result.found...) 213 bufs = append(bufs, result.bufs...) 214 missed = append(missed, result.missed...) 215 } 216 217 return 218 } 219 220 // Store stores the key in the cache. 221 func (c *Memcached) Store(ctx context.Context, keys []string, bufs [][]byte) { 222 for i := range keys { 223 err := instr.CollectedRequest(ctx, "Memcache.Put", c.requestDuration, memcacheStatusCode, func(_ context.Context) error { 224 item := memcache.Item{ 225 Key: keys[i], 226 Value: bufs[i], 227 Expiration: int32(c.cfg.Expiration.Seconds()), 228 } 229 return c.memcache.Set(&item) 230 }) 231 if err != nil { 232 level.Error(util.Logger).Log("msg", "failed to put to memcached", "name", c.name, "err", err) 233 } 234 } 235 } 236 237 // Stop does nothing. 238 func (c *Memcached) Stop() error { 239 if c.inputCh == nil { 240 return nil 241 } 242 243 close(c.inputCh) 244 c.wg.Wait() 245 return nil 246 } 247 248 // HashKey hashes key into something you can store in memcached. 249 func HashKey(key string) string { 250 hasher := fnv.New64a() 251 hasher.Write([]byte(key)) // This'll never error. 252 253 // Hex because memcache errors for the bytes produced by the hash. 254 return hex.EncodeToString(hasher.Sum(nil)) 255 }