storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/disk-cache.go (about) 1 /* 2 * MinIO Cloud Storage, (C) 2019,2020 MinIO, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package cmd 18 19 import ( 20 "context" 21 "errors" 22 "fmt" 23 "io" 24 "net/http" 25 "strconv" 26 "strings" 27 "sync" 28 "sync/atomic" 29 "time" 30 31 "storj.io/minio/cmd/config/cache" 32 xhttp "storj.io/minio/cmd/http" 33 "storj.io/minio/cmd/logger" 34 objectlock "storj.io/minio/pkg/bucket/object/lock" 35 "storj.io/minio/pkg/color" 36 "storj.io/minio/pkg/hash" 37 "storj.io/minio/pkg/sync/errgroup" 38 "storj.io/minio/pkg/wildcard" 39 ) 40 41 const ( 42 cacheBlkSize = 1 << 20 43 cacheGCInterval = time.Minute * 30 44 writeBackStatusHeader = ReservedMetadataPrefixLower + "write-back-status" 45 writeBackRetryHeader = ReservedMetadataPrefixLower + "write-back-retry" 46 ) 47 48 type cacheCommitStatus string 49 50 const ( 51 // CommitPending - cache writeback with backend is pending. 52 CommitPending cacheCommitStatus = "pending" 53 54 // CommitComplete - cache writeback completed ok. 55 CommitComplete cacheCommitStatus = "complete" 56 57 // CommitFailed - cache writeback needs a retry. 58 CommitFailed cacheCommitStatus = "failed" 59 ) 60 61 // String returns string representation of status 62 func (s cacheCommitStatus) String() string { 63 return string(s) 64 } 65 66 // CacheStorageInfo - represents total, free capacity of 67 // underlying cache storage. 68 type CacheStorageInfo struct { 69 Total uint64 // Total cache disk space. 70 Free uint64 // Free cache available space. 71 } 72 73 // CacheObjectLayer implements primitives for cache object API layer. 74 type CacheObjectLayer interface { 75 // Object operations. 76 GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) 77 GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) 78 DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) 79 DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) 80 PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) 81 CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) 82 // Storage operations. 83 StorageInfo(ctx context.Context) CacheStorageInfo 84 CacheStats() *CacheStats 85 } 86 87 // Abstracts disk caching - used by the S3 layer 88 type cacheObjects struct { 89 // slice of cache drives 90 cache []*diskCache 91 // file path patterns to exclude from cache 92 exclude []string 93 // number of accesses after which to cache an object 94 after int 95 // commit objects in async manner 96 commitWriteback bool 97 // if true migration is in progress from v1 to v2 98 migrating bool 99 // mutex to protect migration bool 100 migMutex sync.Mutex 101 // retry queue for writeback cache mode to reattempt upload to backend 102 wbRetryCh chan ObjectInfo 103 // Cache stats 104 cacheStats *CacheStats 105 106 InnerGetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) 107 InnerGetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) 108 InnerDeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) 109 InnerPutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) 110 InnerCopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) 111 } 112 113 func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, bucket, object string, size int64, eTag string, rs *HTTPRangeSpec) error { 114 metadata := map[string]string{"etag": eTag} 115 return dcache.SaveMetadata(ctx, bucket, object, metadata, size, rs, "", true) 116 } 117 118 // Backend metadata could have changed through server side copy - reset cache metadata if that is the case 119 func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error { 120 121 bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined)) 122 cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined)) 123 for k, v := range bkObjectInfo.UserDefined { 124 if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { 125 // Do not need to send any internal metadata 126 continue 127 } 128 bkMeta[http.CanonicalHeaderKey(k)] = v 129 } 130 for k, v := range cacheObjInfo.UserDefined { 131 if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { 132 // Do not need to send any internal metadata 133 continue 134 } 135 cacheMeta[http.CanonicalHeaderKey(k)] = v 136 } 137 138 if !isMetadataSame(bkMeta, cacheMeta) || 139 bkObjectInfo.ETag != cacheObjInfo.ETag || 140 bkObjectInfo.ContentType != cacheObjInfo.ContentType || 141 !bkObjectInfo.Expires.Equal(cacheObjInfo.Expires) { 142 return dcache.SaveMetadata(ctx, bucket, object, getMetadata(bkObjectInfo), bkObjectInfo.Size, nil, "", false) 143 } 144 return c.incHitsToMeta(ctx, dcache, bucket, object, cacheObjInfo.Size, cacheObjInfo.ETag, rs) 145 } 146 147 // DeleteObject clears cache entry if backend delete operation succeeds 148 func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { 149 if objInfo, err = c.InnerDeleteObjectFn(ctx, bucket, object, opts); err != nil { 150 return 151 } 152 if c.isCacheExclude(bucket, object) || c.skipCache() { 153 return 154 } 155 156 dcache, cerr := c.getCacheLoc(bucket, object) 157 if cerr != nil { 158 return objInfo, cerr 159 } 160 dcache.Delete(ctx, bucket, object) 161 return 162 } 163 164 // DeleteObjects batch deletes objects in slice, and clears any cached entries 165 func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { 166 errs := make([]error, len(objects)) 167 objInfos := make([]ObjectInfo, len(objects)) 168 for idx, object := range objects { 169 opts.VersionID = object.VersionID 170 objInfos[idx], errs[idx] = c.DeleteObject(ctx, bucket, object.ObjectName, opts) 171 } 172 deletedObjects := make([]DeletedObject, len(objInfos)) 173 for idx := range errs { 174 if errs[idx] != nil { 175 continue 176 } 177 if objInfos[idx].DeleteMarker { 178 deletedObjects[idx] = DeletedObject{ 179 DeleteMarker: objInfos[idx].DeleteMarker, 180 DeleteMarkerVersionID: objInfos[idx].VersionID, 181 } 182 continue 183 } 184 deletedObjects[idx] = DeletedObject{ 185 ObjectName: objInfos[idx].Name, 186 VersionID: objInfos[idx].VersionID, 187 } 188 } 189 return deletedObjects, errs 190 } 191 192 // construct a metadata k-v map 193 func getMetadata(objInfo ObjectInfo) map[string]string { 194 metadata := make(map[string]string, len(objInfo.UserDefined)+4) 195 metadata["etag"] = objInfo.ETag 196 metadata["content-type"] = objInfo.ContentType 197 if objInfo.ContentEncoding != "" { 198 metadata["content-encoding"] = objInfo.ContentEncoding 199 } 200 if !objInfo.Expires.Equal(timeSentinel) { 201 metadata["expires"] = objInfo.Expires.Format(http.TimeFormat) 202 } 203 metadata["last-modified"] = objInfo.ModTime.Format(http.TimeFormat) 204 for k, v := range objInfo.UserDefined { 205 metadata[k] = v 206 } 207 return metadata 208 } 209 210 // marks cache hit 211 func (c *cacheObjects) incCacheStats(size int64) { 212 c.cacheStats.incHit() 213 c.cacheStats.incBytesServed(size) 214 } 215 216 func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { 217 if c.isCacheExclude(bucket, object) || c.skipCache() { 218 return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts) 219 } 220 var cc *cacheControl 221 var cacheObjSize int64 222 // fetch diskCache if object is currently cached or nearest available cache drive 223 dcache, err := c.getCacheToLoc(ctx, bucket, object) 224 if err != nil { 225 return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts) 226 } 227 228 cacheReader, numCacheHits, cacheErr := dcache.Get(ctx, bucket, object, rs, h, opts) 229 if cacheErr == nil { 230 cacheObjSize = cacheReader.ObjInfo.Size 231 if rs != nil { 232 if _, len, err := rs.GetOffsetLength(cacheObjSize); err == nil { 233 cacheObjSize = len 234 } 235 } 236 cc = cacheControlOpts(cacheReader.ObjInfo) 237 if cc != nil && (!cc.isStale(cacheReader.ObjInfo.ModTime) || 238 cc.onlyIfCached) { 239 // This is a cache hit, mark it so 240 bytesServed := cacheReader.ObjInfo.Size 241 if rs != nil { 242 if _, len, err := rs.GetOffsetLength(bytesServed); err == nil { 243 bytesServed = len 244 } 245 } 246 c.cacheStats.incHit() 247 c.cacheStats.incBytesServed(bytesServed) 248 c.incHitsToMeta(ctx, dcache, bucket, object, cacheReader.ObjInfo.Size, cacheReader.ObjInfo.ETag, rs) 249 return cacheReader, nil 250 } 251 if cc != nil && cc.noStore { 252 cacheReader.Close() 253 c.cacheStats.incMiss() 254 bReader, err := c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts) 255 bReader.ObjInfo.CacheLookupStatus = CacheHit 256 bReader.ObjInfo.CacheStatus = CacheMiss 257 return bReader, err 258 } 259 } 260 261 objInfo, err := c.InnerGetObjectInfoFn(ctx, bucket, object, opts) 262 if backendDownError(err) && cacheErr == nil { 263 c.incCacheStats(cacheObjSize) 264 return cacheReader, nil 265 } else if err != nil { 266 if cacheErr == nil { 267 cacheReader.Close() 268 } 269 if _, ok := err.(ObjectNotFound); ok { 270 if cacheErr == nil { 271 // Delete cached entry if backend object 272 // was deleted. 273 dcache.Delete(ctx, bucket, object) 274 } 275 } 276 c.cacheStats.incMiss() 277 return nil, err 278 } 279 280 if !objInfo.IsCacheable() { 281 if cacheErr == nil { 282 cacheReader.Close() 283 } 284 c.cacheStats.incMiss() 285 return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts) 286 } 287 // skip cache for objects with locks 288 objRetention := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) 289 legalHold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined) 290 if objRetention.Mode.Valid() || legalHold.Status.Valid() { 291 if cacheErr == nil { 292 cacheReader.Close() 293 } 294 c.cacheStats.incMiss() 295 return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts) 296 } 297 if cacheErr == nil { 298 // if ETag matches for stale cache entry, serve from cache 299 if cacheReader.ObjInfo.ETag == objInfo.ETag { 300 // Update metadata in case server-side copy might have changed object metadata 301 c.updateMetadataIfChanged(ctx, dcache, bucket, object, objInfo, cacheReader.ObjInfo, rs) 302 c.incCacheStats(cacheObjSize) 303 return cacheReader, nil 304 } 305 cacheReader.Close() 306 // Object is stale, so delete from cache 307 dcache.Delete(ctx, bucket, object) 308 } 309 310 // Reaching here implies cache miss 311 c.cacheStats.incMiss() 312 313 bkReader, bkErr := c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts) 314 315 if bkErr != nil { 316 return bkReader, bkErr 317 } 318 // If object has less hits than configured cache after, just increment the hit counter 319 // but do not cache it. 320 if numCacheHits < c.after { 321 c.incHitsToMeta(ctx, dcache, bucket, object, objInfo.Size, objInfo.ETag, rs) 322 return bkReader, bkErr 323 } 324 325 // Record if cache has a hit that was invalidated by ETag verification 326 if cacheErr == nil { 327 bkReader.ObjInfo.CacheLookupStatus = CacheHit 328 } 329 330 // Check if we can add it without exceeding total cache size. 331 if !dcache.diskSpaceAvailable(objInfo.Size) { 332 return bkReader, bkErr 333 } 334 335 if rs != nil && !dcache.enableRange { 336 go func() { 337 // if range caching is disabled, download entire object. 338 rs = nil 339 // fill cache in the background for range GET requests 340 bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, rs, h, lockType, opts) 341 if bErr != nil { 342 return 343 } 344 defer bReader.Close() 345 oi, _, _, err := dcache.statRange(GlobalContext, bucket, object, rs) 346 // avoid cache overwrite if another background routine filled cache 347 if err != nil || oi.ETag != bReader.ObjInfo.ETag { 348 // use a new context to avoid locker prematurely timing out operation when the GetObjectNInfo returns. 349 dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, rs, ObjectOptions{ 350 UserDefined: getMetadata(bReader.ObjInfo), 351 }, false) 352 return 353 } 354 }() 355 return bkReader, bkErr 356 } 357 358 // Initialize pipe. 359 pipeReader, pipeWriter := io.Pipe() 360 teeReader := io.TeeReader(bkReader, pipeWriter) 361 userDefined := getMetadata(bkReader.ObjInfo) 362 go func() { 363 _, putErr := dcache.Put(ctx, bucket, object, 364 io.LimitReader(pipeReader, bkReader.ObjInfo.Size), 365 bkReader.ObjInfo.Size, rs, ObjectOptions{ 366 UserDefined: userDefined, 367 }, false) 368 // close the write end of the pipe, so the error gets 369 // propagated to getObjReader 370 pipeWriter.CloseWithError(putErr) 371 }() 372 cleanupBackend := func() { bkReader.Close() } 373 cleanupPipe := func() { pipeWriter.Close() } 374 return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts, cleanupBackend, cleanupPipe) 375 } 376 377 // Returns ObjectInfo from cache if available. 378 func (c *cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { 379 getObjectInfoFn := c.InnerGetObjectInfoFn 380 381 if c.isCacheExclude(bucket, object) || c.skipCache() { 382 return getObjectInfoFn(ctx, bucket, object, opts) 383 } 384 385 // fetch diskCache if object is currently cached or nearest available cache drive 386 dcache, err := c.getCacheToLoc(ctx, bucket, object) 387 if err != nil { 388 return getObjectInfoFn(ctx, bucket, object, opts) 389 } 390 var cc *cacheControl 391 // if cache control setting is valid, avoid HEAD operation to backend 392 cachedObjInfo, _, cerr := dcache.Stat(ctx, bucket, object) 393 if cerr == nil { 394 cc = cacheControlOpts(cachedObjInfo) 395 if cc == nil || (cc != nil && !cc.isStale(cachedObjInfo.ModTime)) { 396 // This is a cache hit, mark it so 397 c.cacheStats.incHit() 398 return cachedObjInfo, nil 399 } 400 } 401 402 objInfo, err := getObjectInfoFn(ctx, bucket, object, opts) 403 if err != nil { 404 if _, ok := err.(ObjectNotFound); ok { 405 // Delete the cached entry if backend object was deleted. 406 dcache.Delete(ctx, bucket, object) 407 c.cacheStats.incMiss() 408 return ObjectInfo{}, err 409 } 410 if !backendDownError(err) { 411 c.cacheStats.incMiss() 412 return ObjectInfo{}, err 413 } 414 if cerr == nil { 415 // This is a cache hit, mark it so 416 c.cacheStats.incHit() 417 return cachedObjInfo, nil 418 } 419 c.cacheStats.incMiss() 420 return ObjectInfo{}, BackendDown{} 421 } 422 // Reaching here implies cache miss 423 c.cacheStats.incMiss() 424 // when backend is up, do a sanity check on cached object 425 if cerr != nil { 426 return objInfo, nil 427 } 428 if cachedObjInfo.ETag != objInfo.ETag { 429 // Delete the cached entry if the backend object was replaced. 430 dcache.Delete(ctx, bucket, object) 431 } 432 return objInfo, nil 433 } 434 435 // CopyObject reverts to backend after evicting any stale cache entries 436 func (c *cacheObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { 437 copyObjectFn := c.InnerCopyObjectFn 438 if c.isCacheExclude(srcBucket, srcObject) || c.skipCache() { 439 return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) 440 } 441 if srcBucket != dstBucket || srcObject != dstObject { 442 return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) 443 } 444 // fetch diskCache if object is currently cached or nearest available cache drive 445 dcache, err := c.getCacheToLoc(ctx, srcBucket, srcObject) 446 if err != nil { 447 return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) 448 } 449 // if currently cached, evict old entry and revert to backend. 450 if cachedObjInfo, _, cerr := dcache.Stat(ctx, srcBucket, srcObject); cerr == nil { 451 cc := cacheControlOpts(cachedObjInfo) 452 if cc == nil || !cc.isStale(cachedObjInfo.ModTime) { 453 dcache.Delete(ctx, srcBucket, srcObject) 454 } 455 } 456 return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) 457 } 458 459 // StorageInfo - returns underlying storage statistics. 460 func (c *cacheObjects) StorageInfo(ctx context.Context) (cInfo CacheStorageInfo) { 461 var total, free uint64 462 for _, cache := range c.cache { 463 if cache == nil { 464 continue 465 } 466 info, err := getDiskInfo(cache.dir) 467 logger.GetReqInfo(ctx).AppendTags("cachePath", cache.dir) 468 logger.LogIf(ctx, err) 469 total += info.Total 470 free += info.Free 471 } 472 return CacheStorageInfo{ 473 Total: total, 474 Free: free, 475 } 476 } 477 478 // CacheStats - returns underlying storage statistics. 479 func (c *cacheObjects) CacheStats() (cs *CacheStats) { 480 return c.cacheStats 481 } 482 483 // skipCache() returns true if cache migration is in progress 484 func (c *cacheObjects) skipCache() bool { 485 c.migMutex.Lock() 486 defer c.migMutex.Unlock() 487 return c.migrating 488 } 489 490 // Returns true if object should be excluded from cache 491 func (c *cacheObjects) isCacheExclude(bucket, object string) bool { 492 // exclude directories from cache 493 if strings.HasSuffix(object, SlashSeparator) { 494 return true 495 } 496 for _, pattern := range c.exclude { 497 matchStr := fmt.Sprintf("%s/%s", bucket, object) 498 if ok := wildcard.MatchSimple(pattern, matchStr); ok { 499 return true 500 } 501 } 502 return false 503 } 504 505 // choose a cache deterministically based on hash of bucket,object. The hash index is treated as 506 // a hint. In the event that the cache drive at hash index is offline, treat the list of cache drives 507 // as a circular buffer and walk through them starting at hash index until an online drive is found. 508 func (c *cacheObjects) getCacheLoc(bucket, object string) (*diskCache, error) { 509 index := c.hashIndex(bucket, object) 510 numDisks := len(c.cache) 511 for k := 0; k < numDisks; k++ { 512 i := (index + k) % numDisks 513 if c.cache[i] == nil { 514 continue 515 } 516 if c.cache[i].IsOnline() { 517 return c.cache[i], nil 518 } 519 } 520 return nil, errDiskNotFound 521 } 522 523 // get cache disk where object is currently cached for a GET operation. If object does not exist at that location, 524 // treat the list of cache drives as a circular buffer and walk through them starting at hash index 525 // until an online drive is found.If object is not found, fall back to the first online cache drive 526 // closest to the hash index, so that object can be re-cached. 527 func (c *cacheObjects) getCacheToLoc(ctx context.Context, bucket, object string) (*diskCache, error) { 528 index := c.hashIndex(bucket, object) 529 530 numDisks := len(c.cache) 531 // save first online cache disk closest to the hint index 532 var firstOnlineDisk *diskCache 533 for k := 0; k < numDisks; k++ { 534 i := (index + k) % numDisks 535 if c.cache[i] == nil { 536 continue 537 } 538 if c.cache[i].IsOnline() { 539 if firstOnlineDisk == nil { 540 firstOnlineDisk = c.cache[i] 541 } 542 if c.cache[i].Exists(ctx, bucket, object) { 543 return c.cache[i], nil 544 } 545 } 546 } 547 548 if firstOnlineDisk != nil { 549 return firstOnlineDisk, nil 550 } 551 return nil, errDiskNotFound 552 } 553 554 // Compute a unique hash sum for bucket and object 555 func (c *cacheObjects) hashIndex(bucket, object string) int { 556 return crcHashMod(pathJoin(bucket, object), len(c.cache)) 557 } 558 559 // newCache initializes the cacheFSObjects for the "drives" specified in config.json 560 // or the global env overrides. 561 func newCache(config cache.Config) ([]*diskCache, bool, error) { 562 var caches []*diskCache 563 ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{}) 564 formats, migrating, err := loadAndValidateCacheFormat(ctx, config.Drives) 565 if err != nil { 566 return nil, false, err 567 } 568 for i, dir := range config.Drives { 569 // skip diskCache creation for cache drives missing a format.json 570 if formats[i] == nil { 571 caches = append(caches, nil) 572 continue 573 } 574 if err := checkAtimeSupport(dir); err != nil { 575 return nil, false, errors.New("Atime support required for disk caching") 576 } 577 578 cache, err := newDiskCache(ctx, dir, config) 579 if err != nil { 580 return nil, false, err 581 } 582 caches = append(caches, cache) 583 } 584 return caches, migrating, nil 585 } 586 587 func (c *cacheObjects) migrateCacheFromV1toV2(ctx context.Context) { 588 logStartupMessage(color.Blue("Cache migration initiated ....")) 589 590 g := errgroup.WithNErrs(len(c.cache)) 591 for index, dc := range c.cache { 592 if dc == nil { 593 continue 594 } 595 index := index 596 g.Go(func() error { 597 // start migration from V1 to V2 598 return migrateOldCache(ctx, c.cache[index]) 599 }, index) 600 } 601 602 errCnt := 0 603 for _, err := range g.Wait() { 604 if err != nil { 605 errCnt++ 606 logger.LogIf(ctx, err) 607 continue 608 } 609 } 610 611 if errCnt > 0 { 612 return 613 } 614 615 // update migration status 616 c.migMutex.Lock() 617 defer c.migMutex.Unlock() 618 c.migrating = false 619 logStartupMessage(color.Blue("Cache migration completed successfully.")) 620 } 621 622 // PutObject - caches the uploaded object for single Put operations 623 func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { 624 putObjectFn := c.InnerPutObjectFn 625 dcache, err := c.getCacheToLoc(ctx, bucket, object) 626 if err != nil { 627 // disk cache could not be located,execute backend call. 628 return putObjectFn(ctx, bucket, object, r, opts) 629 } 630 size := r.Size() 631 if c.skipCache() { 632 return putObjectFn(ctx, bucket, object, r, opts) 633 } 634 635 // fetch from backend if there is no space on cache drive 636 if !dcache.diskSpaceAvailable(size) { 637 return putObjectFn(ctx, bucket, object, r, opts) 638 } 639 640 if opts.ServerSideEncryption != nil { 641 dcache.Delete(ctx, bucket, object) 642 return putObjectFn(ctx, bucket, object, r, opts) 643 } 644 645 // skip cache for objects with locks 646 objRetention := objectlock.GetObjectRetentionMeta(opts.UserDefined) 647 legalHold := objectlock.GetObjectLegalHoldMeta(opts.UserDefined) 648 if objRetention.Mode.Valid() || legalHold.Status.Valid() { 649 dcache.Delete(ctx, bucket, object) 650 return putObjectFn(ctx, bucket, object, r, opts) 651 } 652 653 // fetch from backend if cache exclude pattern or cache-control 654 // directive set to exclude 655 if c.isCacheExclude(bucket, object) { 656 dcache.Delete(ctx, bucket, object) 657 return putObjectFn(ctx, bucket, object, r, opts) 658 } 659 if c.commitWriteback { 660 oi, err := dcache.Put(ctx, bucket, object, r, r.Size(), nil, opts, false) 661 if err != nil { 662 return ObjectInfo{}, err 663 } 664 go c.uploadObject(GlobalContext, oi) 665 return oi, nil 666 } 667 objInfo, err = putObjectFn(ctx, bucket, object, r, opts) 668 669 if err == nil { 670 go func() { 671 // fill cache in the background 672 bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, nil, http.Header{}, readLock, ObjectOptions{}) 673 if bErr != nil { 674 return 675 } 676 defer bReader.Close() 677 oi, _, err := dcache.Stat(GlobalContext, bucket, object) 678 // avoid cache overwrite if another background routine filled cache 679 if err != nil || oi.ETag != bReader.ObjInfo.ETag { 680 dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false) 681 } 682 }() 683 } 684 return objInfo, err 685 } 686 687 // upload cached object to backend in async commit mode. 688 func (c *cacheObjects) uploadObject(ctx context.Context, oi ObjectInfo) { 689 dcache, err := c.getCacheToLoc(ctx, oi.Bucket, oi.Name) 690 if err != nil { 691 // disk cache could not be located. 692 logger.LogIf(ctx, fmt.Errorf("Could not upload %s/%s to backend: %w", oi.Bucket, oi.Name, err)) 693 return 694 } 695 cReader, _, bErr := dcache.Get(ctx, oi.Bucket, oi.Name, nil, http.Header{}, ObjectOptions{}) 696 if bErr != nil { 697 return 698 } 699 defer cReader.Close() 700 701 if cReader.ObjInfo.ETag != oi.ETag { 702 return 703 } 704 st := cacheCommitStatus(oi.UserDefined[writeBackStatusHeader]) 705 if st == CommitComplete || st.String() == "" { 706 return 707 } 708 hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size) 709 if err != nil { 710 return 711 } 712 var opts ObjectOptions 713 opts.UserDefined = make(map[string]string) 714 opts.UserDefined[xhttp.ContentMD5] = oi.UserDefined["content-md5"] 715 objInfo, err := c.InnerPutObjectFn(ctx, oi.Bucket, oi.Name, NewPutObjReader(hashReader), opts) 716 wbCommitStatus := CommitComplete 717 if err != nil { 718 wbCommitStatus = CommitFailed 719 } 720 721 meta := cloneMSS(cReader.ObjInfo.UserDefined) 722 retryCnt := 0 723 if wbCommitStatus == CommitFailed { 724 retryCnt, _ = strconv.Atoi(meta[writeBackRetryHeader]) 725 retryCnt++ 726 meta[writeBackRetryHeader] = strconv.Itoa(retryCnt) 727 } else { 728 delete(meta, writeBackRetryHeader) 729 } 730 meta[writeBackStatusHeader] = wbCommitStatus.String() 731 meta["etag"] = oi.ETag 732 dcache.SaveMetadata(ctx, oi.Bucket, oi.Name, meta, objInfo.Size, nil, "", false) 733 if retryCnt > 0 { 734 // slow down retries 735 time.Sleep(time.Second * time.Duration(retryCnt%10+1)) 736 c.queueWritebackRetry(oi) 737 } 738 } 739 740 func (c *cacheObjects) queueWritebackRetry(oi ObjectInfo) { 741 select { 742 case c.wbRetryCh <- oi: 743 c.uploadObject(GlobalContext, oi) 744 default: 745 } 746 } 747 748 // Returns cacheObjects for use by Server. 749 func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjectLayer, error) { 750 // list of disk caches for cache "drives" specified in config.json or MINIO_CACHE_DRIVES env var. 751 cache, migrateSw, err := newCache(config) 752 if err != nil { 753 return nil, err 754 } 755 c := &cacheObjects{ 756 cache: cache, 757 exclude: config.Exclude, 758 after: config.After, 759 migrating: migrateSw, 760 migMutex: sync.Mutex{}, 761 commitWriteback: config.CommitWriteback, 762 cacheStats: newCacheStats(), 763 InnerGetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { 764 return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts) 765 }, 766 InnerGetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { 767 return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) 768 }, 769 InnerDeleteObjectFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { 770 return newObjectLayerFn().DeleteObject(ctx, bucket, object, opts) 771 }, 772 InnerPutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { 773 return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts) 774 }, 775 InnerCopyObjectFn: func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { 776 return newObjectLayerFn().CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts) 777 }, 778 } 779 c.cacheStats.GetDiskStats = func() []CacheDiskStats { 780 cacheDiskStats := make([]CacheDiskStats, len(c.cache)) 781 for i := range c.cache { 782 dcache := c.cache[i] 783 cacheDiskStats[i] = CacheDiskStats{} 784 if dcache != nil { 785 info, err := getDiskInfo(dcache.dir) 786 logger.LogIf(ctx, err) 787 cacheDiskStats[i].UsageSize = info.Used 788 cacheDiskStats[i].TotalCapacity = info.Total 789 cacheDiskStats[i].Dir = dcache.stats.Dir 790 atomic.StoreInt32(&cacheDiskStats[i].UsageState, atomic.LoadInt32(&dcache.stats.UsageState)) 791 atomic.StoreUint64(&cacheDiskStats[i].UsagePercent, atomic.LoadUint64(&dcache.stats.UsagePercent)) 792 } 793 } 794 return cacheDiskStats 795 } 796 if migrateSw { 797 go c.migrateCacheFromV1toV2(ctx) 798 } 799 go c.gc(ctx) 800 if c.commitWriteback { 801 c.wbRetryCh = make(chan ObjectInfo, 10000) 802 go func() { 803 <-GlobalContext.Done() 804 close(c.wbRetryCh) 805 }() 806 go c.queuePendingWriteback(ctx) 807 } 808 809 return c, nil 810 } 811 812 func (c *cacheObjects) gc(ctx context.Context) { 813 ticker := time.NewTicker(cacheGCInterval) 814 815 defer ticker.Stop() 816 for { 817 select { 818 case <-ctx.Done(): 819 return 820 case <-ticker.C: 821 if c.migrating { 822 continue 823 } 824 for _, dcache := range c.cache { 825 if dcache != nil { 826 // Check if there is disk. 827 // Will queue a GC scan if at high watermark. 828 dcache.diskSpaceAvailable(0) 829 } 830 } 831 } 832 } 833 } 834 835 // queues any pending or failed async commits when server restarts 836 func (c *cacheObjects) queuePendingWriteback(ctx context.Context) { 837 for _, dcache := range c.cache { 838 if dcache != nil { 839 for { 840 select { 841 case <-ctx.Done(): 842 return 843 case oi, ok := <-dcache.retryWritebackCh: 844 if !ok { 845 goto next 846 } 847 c.queueWritebackRetry(oi) 848 default: 849 time.Sleep(time.Second * 1) 850 } 851 } 852 next: 853 } 854 } 855 }