github.com/thanos-io/thanos@v0.32.5/pkg/store/cache/caching_bucket.go (about) 1 // Copyright (c) The Thanos Authors. 2 // Licensed under the Apache License 2.0. 3 4 package storecache 5 6 import ( 7 "bytes" 8 "context" 9 "encoding/json" 10 "io" 11 "strconv" 12 "sync" 13 "time" 14 15 "github.com/go-kit/log" 16 "github.com/go-kit/log/level" 17 "github.com/pkg/errors" 18 "github.com/prometheus/client_golang/prometheus" 19 "github.com/prometheus/client_golang/prometheus/promauto" 20 "golang.org/x/sync/errgroup" 21 22 "github.com/thanos-io/objstore" 23 24 "github.com/thanos-io/thanos/pkg/cache" 25 "github.com/thanos-io/thanos/pkg/runutil" 26 "github.com/thanos-io/thanos/pkg/store/cache/cachekey" 27 ) 28 29 const ( 30 originCache = "cache" 31 originBucket = "bucket" 32 ) 33 34 var ( 35 errObjNotFound = errors.Errorf("object not found") 36 ) 37 38 // CachingBucket implementation that provides some caching features, based on passed configuration. 39 type CachingBucket struct { 40 objstore.Bucket 41 42 cfg *cache.CachingBucketConfig 43 logger log.Logger 44 45 requestedGetRangeBytes *prometheus.CounterVec 46 fetchedGetRangeBytes *prometheus.CounterVec 47 refetchedGetRangeBytes *prometheus.CounterVec 48 49 operationConfigs map[string][]*cache.OperationConfig 50 operationRequests *prometheus.CounterVec 51 operationHits *prometheus.CounterVec 52 } 53 54 // NewCachingBucket creates new caching bucket with provided configuration. Configuration should not be 55 // changed after creating caching bucket. 56 func NewCachingBucket(b objstore.Bucket, cfg *cache.CachingBucketConfig, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { 57 if b == nil { 58 return nil, errors.New("bucket is nil") 59 } 60 61 cb := &CachingBucket{ 62 Bucket: b, 63 cfg: cfg, 64 logger: logger, 65 66 operationConfigs: map[string][]*cache.OperationConfig{}, 67 68 requestedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ 69 Name: "thanos_store_bucket_cache_getrange_requested_bytes_total", 70 Help: "Total number of bytes requested via GetRange.", 71 }, []string{"config"}), 72 fetchedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ 73 Name: "thanos_store_bucket_cache_getrange_fetched_bytes_total", 74 Help: "Total number of bytes fetched because of GetRange operation. Data from bucket is then stored to cache.", 75 }, []string{"origin", "config"}), 76 refetchedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ 77 Name: "thanos_store_bucket_cache_getrange_refetched_bytes_total", 78 Help: "Total number of bytes re-fetched from storage because of GetRange operation, despite being in cache already.", 79 }, []string{"origin", "config"}), 80 81 operationRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ 82 Name: "thanos_store_bucket_cache_operation_requests_total", 83 Help: "Number of requested operations matching given config.", 84 }, []string{"operation", "config"}), 85 operationHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ 86 Name: "thanos_store_bucket_cache_operation_hits_total", 87 Help: "Number of operations served from cache for given config.", 88 }, []string{"operation", "config"}), 89 } 90 91 for op, names := range cfg.AllConfigNames() { 92 for _, n := range names { 93 cb.operationRequests.WithLabelValues(op, n) 94 cb.operationHits.WithLabelValues(op, n) 95 96 if op == objstore.OpGetRange { 97 cb.requestedGetRangeBytes.WithLabelValues(n) 98 cb.fetchedGetRangeBytes.WithLabelValues(originCache, n) 99 cb.fetchedGetRangeBytes.WithLabelValues(originBucket, n) 100 cb.refetchedGetRangeBytes.WithLabelValues(originCache, n) 101 } 102 } 103 } 104 105 return cb, nil 106 } 107 108 func (cb *CachingBucket) Name() string { 109 return "caching: " + cb.Bucket.Name() 110 } 111 112 func (cb *CachingBucket) WithExpectedErrs(expectedFunc objstore.IsOpFailureExpectedFunc) objstore.Bucket { 113 if ib, ok := cb.Bucket.(objstore.InstrumentedBucket); ok { 114 // Make a copy, but replace bucket with instrumented one. 115 res := &CachingBucket{} 116 *res = *cb 117 res.Bucket = ib.WithExpectedErrs(expectedFunc) 118 return res 119 } 120 121 return cb 122 } 123 124 func (cb *CachingBucket) ReaderWithExpectedErrs(expectedFunc objstore.IsOpFailureExpectedFunc) objstore.BucketReader { 125 return cb.WithExpectedErrs(expectedFunc) 126 } 127 128 func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { 129 cfgName, cfg := cb.cfg.FindIterConfig(dir) 130 if cfg == nil { 131 return cb.Bucket.Iter(ctx, dir, f, options...) 132 } 133 134 cb.operationRequests.WithLabelValues(objstore.OpIter, cfgName).Inc() 135 136 iterVerb := cachekey.BucketCacheKey{Verb: cachekey.IterVerb, Name: dir} 137 opts := objstore.ApplyIterOptions(options...) 138 if opts.Recursive { 139 iterVerb.Verb = cachekey.IterRecursiveVerb 140 } 141 142 key := iterVerb.String() 143 data := cfg.Cache.Fetch(ctx, []string{key}) 144 if data[key] != nil { 145 list, err := cfg.Codec.Decode(data[key]) 146 if err == nil { 147 cb.operationHits.WithLabelValues(objstore.OpIter, cfgName).Inc() 148 for _, n := range list { 149 if err := f(n); err != nil { 150 return err 151 } 152 } 153 return nil 154 } 155 level.Warn(cb.logger).Log("msg", "failed to decode cached Iter result", "key", key, "err", err) 156 } 157 158 // Iteration can take a while (esp. since it calls function), and iterTTL is generally low. 159 // We will compute TTL based time when iteration started. 160 iterTime := time.Now() 161 var list []string 162 err := cb.Bucket.Iter(ctx, dir, func(s string) error { 163 list = append(list, s) 164 return f(s) 165 }, options...) 166 167 remainingTTL := cfg.TTL - time.Since(iterTime) 168 if err == nil && remainingTTL > 0 { 169 data, encErr := cfg.Codec.Encode(list) 170 if encErr == nil { 171 cfg.Cache.Store(map[string][]byte{key: data}, remainingTTL) 172 return nil 173 } 174 level.Warn(cb.logger).Log("msg", "failed to encode Iter result", "key", key, "err", encErr) 175 } 176 return err 177 } 178 179 func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) { 180 cfgName, cfg := cb.cfg.FindExistConfig(name) 181 if cfg == nil { 182 return cb.Bucket.Exists(ctx, name) 183 } 184 185 cb.operationRequests.WithLabelValues(objstore.OpExists, cfgName).Inc() 186 187 existsVerb := cachekey.BucketCacheKey{Verb: cachekey.ExistsVerb, Name: name} 188 key := existsVerb.String() 189 hits := cfg.Cache.Fetch(ctx, []string{key}) 190 191 if ex := hits[key]; ex != nil { 192 exists, err := strconv.ParseBool(string(ex)) 193 if err == nil { 194 cb.operationHits.WithLabelValues(objstore.OpExists, cfgName).Inc() 195 return exists, nil 196 } 197 level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "key", key, "val", string(ex)) 198 } 199 200 existsTime := time.Now() 201 ok, err := cb.Bucket.Exists(ctx, name) 202 if err == nil { 203 storeExistsCacheEntry(key, ok, existsTime, cfg.Cache, cfg.ExistsTTL, cfg.DoesntExistTTL) 204 } 205 206 return ok, err 207 } 208 209 func storeExistsCacheEntry(cachingKey string, exists bool, ts time.Time, cache cache.Cache, existsTTL, doesntExistTTL time.Duration) { 210 var ttl time.Duration 211 if exists { 212 ttl = existsTTL - time.Since(ts) 213 } else { 214 ttl = doesntExistTTL - time.Since(ts) 215 } 216 217 if ttl > 0 { 218 cache.Store(map[string][]byte{cachingKey: []byte(strconv.FormatBool(exists))}, ttl) 219 } 220 } 221 222 func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { 223 cfgName, cfg := cb.cfg.FindGetConfig(name) 224 if cfg == nil { 225 return cb.Bucket.Get(ctx, name) 226 } 227 228 cb.operationRequests.WithLabelValues(objstore.OpGet, cfgName).Inc() 229 230 contentVerb := cachekey.BucketCacheKey{Verb: cachekey.ContentVerb, Name: name} 231 contentKey := contentVerb.String() 232 existsVerb := cachekey.BucketCacheKey{Verb: cachekey.ExistsVerb, Name: name} 233 existsKey := existsVerb.String() 234 235 hits := cfg.Cache.Fetch(ctx, []string{contentKey, existsKey}) 236 if hits[contentKey] != nil { 237 cb.operationHits.WithLabelValues(objstore.OpGet, cfgName).Inc() 238 return objstore.NopCloserWithSize(bytes.NewBuffer(hits[contentKey])), nil 239 } 240 241 // If we know that file doesn't exist, we can return that. Useful for deletion marks. 242 if ex := hits[existsKey]; ex != nil { 243 if exists, err := strconv.ParseBool(string(ex)); err == nil && !exists { 244 cb.operationHits.WithLabelValues(objstore.OpGet, cfgName).Inc() 245 return nil, errObjNotFound 246 } 247 } 248 249 getTime := time.Now() 250 reader, err := cb.Bucket.Get(ctx, name) 251 if err != nil { 252 if cb.Bucket.IsObjNotFoundErr(err) { 253 // Cache that object doesn't exist. 254 storeExistsCacheEntry(existsKey, false, getTime, cfg.Cache, cfg.ExistsTTL, cfg.DoesntExistTTL) 255 } 256 257 return nil, err 258 } 259 260 storeExistsCacheEntry(existsKey, true, getTime, cfg.Cache, cfg.ExistsTTL, cfg.DoesntExistTTL) 261 return &getReader{ 262 c: cfg.Cache, 263 ctx: ctx, 264 r: reader, 265 buf: new(bytes.Buffer), 266 startTime: getTime, 267 ttl: cfg.ContentTTL, 268 cacheKey: contentKey, 269 maxSize: cfg.MaxCacheableSize, 270 }, nil 271 } 272 273 func (cb *CachingBucket) IsObjNotFoundErr(err error) bool { 274 return err == errObjNotFound || cb.Bucket.IsObjNotFoundErr(err) 275 } 276 277 func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { 278 if off < 0 || length <= 0 { 279 return cb.Bucket.GetRange(ctx, name, off, length) 280 } 281 282 cfgName, cfg := cb.cfg.FindGetRangeConfig(name) 283 if cfg == nil { 284 return cb.Bucket.GetRange(ctx, name, off, length) 285 } 286 287 return cb.cachedGetRange(ctx, name, off, length, cfgName, cfg) 288 } 289 290 func (cb *CachingBucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { 291 cfgName, cfg := cb.cfg.FindAttributesConfig(name) 292 if cfg == nil { 293 return cb.Bucket.Attributes(ctx, name) 294 } 295 296 return cb.cachedAttributes(ctx, name, cfgName, cfg.Cache, cfg.TTL) 297 } 298 299 func (cb *CachingBucket) cachedAttributes(ctx context.Context, name, cfgName string, cache cache.Cache, ttl time.Duration) (objstore.ObjectAttributes, error) { 300 attrVerb := cachekey.BucketCacheKey{Verb: cachekey.AttributesVerb, Name: name} 301 key := attrVerb.String() 302 303 cb.operationRequests.WithLabelValues(objstore.OpAttributes, cfgName).Inc() 304 305 hits := cache.Fetch(ctx, []string{key}) 306 if raw, ok := hits[key]; ok { 307 var attrs objstore.ObjectAttributes 308 err := json.Unmarshal(raw, &attrs) 309 if err == nil { 310 cb.operationHits.WithLabelValues(objstore.OpAttributes, cfgName).Inc() 311 return attrs, nil 312 } 313 314 level.Warn(cb.logger).Log("msg", "failed to decode cached Attributes result", "key", key, "err", err) 315 } 316 317 attrs, err := cb.Bucket.Attributes(ctx, name) 318 if err != nil { 319 return objstore.ObjectAttributes{}, err 320 } 321 322 if raw, err := json.Marshal(attrs); err == nil { 323 cache.Store(map[string][]byte{key: raw}, ttl) 324 } else { 325 level.Warn(cb.logger).Log("msg", "failed to encode cached Attributes result", "key", key, "err", err) 326 } 327 328 return attrs, nil 329 } 330 331 func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, cfgName string, cfg *cache.GetRangeConfig) (io.ReadCloser, error) { 332 cb.operationRequests.WithLabelValues(objstore.OpGetRange, cfgName).Inc() 333 cb.requestedGetRangeBytes.WithLabelValues(cfgName).Add(float64(length)) 334 335 attrs, err := cb.cachedAttributes(ctx, name, cfgName, cfg.Cache, cfg.AttributesTTL) 336 if err != nil { 337 return nil, errors.Wrapf(err, "failed to get object attributes: %s", name) 338 } 339 340 // If length goes over object size, adjust length. We use it later to limit number of read bytes. 341 if offset+length > attrs.Size { 342 length = attrs.Size - offset 343 } 344 345 // Start and end range are subrange-aligned offsets into object, that we're going to read. 346 startRange := (offset / cfg.SubrangeSize) * cfg.SubrangeSize 347 endRange := ((offset + length) / cfg.SubrangeSize) * cfg.SubrangeSize 348 if (offset+length)%cfg.SubrangeSize > 0 { 349 endRange += cfg.SubrangeSize 350 } 351 352 // The very last subrange in the object may have length that is not divisible by subrange size. 353 lastSubrangeOffset := endRange - cfg.SubrangeSize 354 lastSubrangeLength := int(cfg.SubrangeSize) 355 if endRange > attrs.Size { 356 lastSubrangeOffset = (attrs.Size / cfg.SubrangeSize) * cfg.SubrangeSize 357 lastSubrangeLength = int(attrs.Size - lastSubrangeOffset) 358 } 359 360 numSubranges := (endRange - startRange) / cfg.SubrangeSize 361 362 offsetKeys := make(map[int64]string, numSubranges) 363 keys := make([]string, 0, numSubranges) 364 365 totalRequestedBytes := int64(0) 366 for off := startRange; off < endRange; off += cfg.SubrangeSize { 367 end := off + cfg.SubrangeSize 368 if end > attrs.Size { 369 end = attrs.Size 370 } 371 totalRequestedBytes += (end - off) 372 objectSubrange := cachekey.BucketCacheKey{Verb: cachekey.SubrangeVerb, Name: name, Start: off, End: end} 373 k := objectSubrange.String() 374 keys = append(keys, k) 375 offsetKeys[off] = k 376 } 377 378 // Try to get all subranges from the cache. 379 totalCachedBytes := int64(0) 380 hits := cfg.Cache.Fetch(ctx, keys) 381 for _, b := range hits { 382 totalCachedBytes += int64(len(b)) 383 } 384 cb.fetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(totalCachedBytes)) 385 cb.operationHits.WithLabelValues(objstore.OpGetRange, cfgName).Add(float64(len(hits)) / float64(len(keys))) 386 387 if len(hits) < len(keys) { 388 if hits == nil { 389 hits = map[string][]byte{} 390 } 391 392 err := cb.fetchMissingSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, cfgName, cfg) 393 if err != nil { 394 return nil, err 395 } 396 } 397 398 return io.NopCloser(newSubrangesReader(cfg.SubrangeSize, offsetKeys, hits, offset, length)), nil 399 } 400 401 type rng struct { 402 start, end int64 403 } 404 405 // fetchMissingSubranges fetches missing subranges, stores them into "hits" map 406 // and into cache as well (using provided cacheKeys). 407 func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, startRange, endRange int64, cacheKeys map[int64]string, hits map[string][]byte, lastSubrangeOffset int64, lastSubrangeLength int, cfgName string, cfg *cache.GetRangeConfig) error { 408 // Ordered list of missing sub-ranges. 409 var missing []rng 410 411 for off := startRange; off < endRange; off += cfg.SubrangeSize { 412 if hits[cacheKeys[off]] == nil { 413 missing = append(missing, rng{start: off, end: off + cfg.SubrangeSize}) 414 } 415 } 416 417 missing = mergeRanges(missing, 0) // Merge adjacent ranges. 418 // Keep merging until we have only max number of ranges (= requests). 419 for limit := cfg.SubrangeSize; cfg.MaxSubRequests > 0 && len(missing) > cfg.MaxSubRequests; limit = limit * 2 { 420 missing = mergeRanges(missing, limit) 421 } 422 423 var hitsMutex sync.Mutex 424 425 // Run parallel queries for each missing range. Fetched data is stored into 'hits' map, protected by hitsMutex. 426 g, gctx := errgroup.WithContext(ctx) 427 for _, m := range missing { 428 m := m 429 g.Go(func() error { 430 r, err := cb.Bucket.GetRange(gctx, name, m.start, m.end-m.start) 431 if err != nil { 432 return errors.Wrapf(err, "fetching range [%d, %d]", m.start, m.end) 433 } 434 defer runutil.CloseWithLogOnErr(cb.logger, r, "fetching range [%d, %d]", m.start, m.end) 435 436 var bufSize int64 437 if lastSubrangeOffset >= m.end { 438 bufSize = m.end - m.start 439 } else { 440 bufSize = ((m.end - m.start) - cfg.SubrangeSize) + int64(lastSubrangeLength) 441 } 442 443 buf := make([]byte, bufSize) 444 _, err = io.ReadFull(r, buf) 445 if err != nil { 446 return errors.Wrapf(err, "fetching range [%d, %d]", m.start, m.end) 447 } 448 449 for off := m.start; off < m.end && gctx.Err() == nil; off += cfg.SubrangeSize { 450 key := cacheKeys[off] 451 if key == "" { 452 return errors.Errorf("fetching range [%d, %d]: caching key for offset %d not found", m.start, m.end, off) 453 } 454 455 // We need a new buffer for each subrange, both for storing into hits, and also for caching. 456 var subrangeData []byte 457 if off == lastSubrangeOffset { 458 // The very last subrange in the object may have different length, 459 // if object length isn't divisible by subrange size. 460 subrangeData = buf[off-m.start : off-m.start+int64(lastSubrangeLength)] 461 } else { 462 subrangeData = buf[off-m.start : off-m.start+cfg.SubrangeSize] 463 } 464 465 storeToCache := false 466 hitsMutex.Lock() 467 if _, ok := hits[key]; !ok { 468 storeToCache = true 469 hits[key] = subrangeData 470 } 471 hitsMutex.Unlock() 472 473 if storeToCache { 474 cb.fetchedGetRangeBytes.WithLabelValues(originBucket, cfgName).Add(float64(len(subrangeData))) 475 cfg.Cache.Store(map[string][]byte{key: subrangeData}, cfg.SubrangeTTL) 476 } else { 477 cb.refetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(len(subrangeData))) 478 } 479 } 480 481 return gctx.Err() 482 }) 483 } 484 485 return g.Wait() 486 } 487 488 // Merges ranges that are close to each other. Modifies input. 489 func mergeRanges(input []rng, limit int64) []rng { 490 if len(input) == 0 { 491 return input 492 } 493 494 last := 0 495 for ix := 1; ix < len(input); ix++ { 496 if (input[ix].start - input[last].end) <= limit { 497 input[last].end = input[ix].end 498 } else { 499 last++ 500 input[last] = input[ix] 501 } 502 } 503 return input[:last+1] 504 } 505 506 // Reader implementation that uses in-memory subranges. 507 type subrangesReader struct { 508 subrangeSize int64 509 510 // Mapping of subrangeSize-aligned offsets to keys in hits. 511 offsetsKeys map[int64]string 512 subranges map[string][]byte 513 514 // Offset for next read, used to find correct subrange to return data from. 515 readOffset int64 516 517 // Remaining data to return from this reader. Once zero, this reader reports EOF. 518 remaining int64 519 } 520 521 func newSubrangesReader(subrangeSize int64, offsetsKeys map[int64]string, subranges map[string][]byte, readOffset, remaining int64) *subrangesReader { 522 return &subrangesReader{ 523 subrangeSize: subrangeSize, 524 offsetsKeys: offsetsKeys, 525 subranges: subranges, 526 527 readOffset: readOffset, 528 remaining: remaining, 529 } 530 } 531 532 func (c *subrangesReader) Read(p []byte) (n int, err error) { 533 if c.remaining <= 0 { 534 return 0, io.EOF 535 } 536 537 currentSubrangeOffset := (c.readOffset / c.subrangeSize) * c.subrangeSize 538 currentSubrange, err := c.subrangeAt(currentSubrangeOffset) 539 if err != nil { 540 return 0, errors.Wrapf(err, "read position: %d", c.readOffset) 541 } 542 543 offsetInSubrange := int(c.readOffset - currentSubrangeOffset) 544 toCopy := len(currentSubrange) - offsetInSubrange 545 if toCopy <= 0 { 546 // This can only happen if subrange's length is not subrangeSize, and reader is told to read more data. 547 return 0, errors.Errorf("no more data left in subrange at position %d, subrange length %d, reading position %d", currentSubrangeOffset, len(currentSubrange), c.readOffset) 548 } 549 550 if len(p) < toCopy { 551 toCopy = len(p) 552 } 553 if c.remaining < int64(toCopy) { 554 toCopy = int(c.remaining) // Conversion is safe, c.remaining is small enough. 555 } 556 557 copy(p, currentSubrange[offsetInSubrange:offsetInSubrange+toCopy]) 558 c.readOffset += int64(toCopy) 559 c.remaining -= int64(toCopy) 560 561 return toCopy, nil 562 } 563 564 func (c *subrangesReader) subrangeAt(offset int64) ([]byte, error) { 565 b := c.subranges[c.offsetsKeys[offset]] 566 if b == nil { 567 return nil, errors.Errorf("subrange for offset %d not found", offset) 568 } 569 return b, nil 570 } 571 572 type getReader struct { 573 c cache.Cache 574 ctx context.Context 575 r io.ReadCloser 576 buf *bytes.Buffer 577 startTime time.Time 578 ttl time.Duration 579 cacheKey string 580 maxSize int 581 } 582 583 func (g *getReader) Close() error { 584 // We don't know if entire object was read, don't store it here. 585 g.buf = nil 586 return g.r.Close() 587 } 588 589 func (g *getReader) Read(p []byte) (n int, err error) { 590 n, err = g.r.Read(p) 591 if n > 0 && g.buf != nil { 592 if g.buf.Len()+n <= g.maxSize { 593 g.buf.Write(p[:n]) 594 } else { 595 // Object is larger than max size, stop caching. 596 g.buf = nil 597 } 598 } 599 600 if err == io.EOF && g.buf != nil { 601 remainingTTL := g.ttl - time.Since(g.startTime) 602 if remainingTTL > 0 { 603 g.c.Store(map[string][]byte{g.cacheKey: g.buf.Bytes()}, remainingTTL) 604 } 605 // Clear reference, to avoid doing another Store on next read. 606 g.buf = nil 607 } 608 609 return n, err 610 } 611 612 // JSONIterCodec encodes iter results into JSON. Suitable for root dir. 613 type JSONIterCodec struct{} 614 615 func (jic JSONIterCodec) Encode(files []string) ([]byte, error) { 616 return json.Marshal(files) 617 } 618 619 func (jic JSONIterCodec) Decode(data []byte) ([]string, error) { 620 var list []string 621 err := json.Unmarshal(data, &list) 622 return list, err 623 }