storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/data-scanner.go (about)

     1  /*
     2   * MinIO Cloud Storage, (C) 2020 MinIO, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package cmd
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/binary"
    23  	"errors"
    24  	"math"
    25  	"math/rand"
    26  	"net/http"
    27  	"os"
    28  	"path"
    29  	"strings"
    30  	"sync"
    31  	"time"
    32  
    33  	"github.com/willf/bloom"
    34  
    35  	"storj.io/minio/cmd/config/heal"
    36  	"storj.io/minio/cmd/logger"
    37  	"storj.io/minio/cmd/logger/message/audit"
    38  	"storj.io/minio/pkg/bucket/lifecycle"
    39  	"storj.io/minio/pkg/bucket/replication"
    40  	"storj.io/minio/pkg/color"
    41  	"storj.io/minio/pkg/console"
    42  	"storj.io/minio/pkg/event"
    43  	"storj.io/minio/pkg/hash"
    44  	"storj.io/minio/pkg/madmin"
    45  )
    46  
    47  const (
    48  	dataScannerSleepPerFolder = time.Millisecond // Time to wait between folders.
    49  	dataUsageUpdateDirCycles  = 16               // Visit all folders every n cycles.
    50  
    51  	healDeleteDangling    = true
    52  	healFolderIncludeProb = 32  // Include a clean folder one in n cycles.
    53  	healObjectSelectProb  = 512 // Overall probability of a file being scanned; one in n.
    54  )
    55  
    56  var (
    57  	globalHealConfig   heal.Config
    58  	globalHealConfigMu sync.Mutex
    59  
    60  	dataScannerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
    61  	// Sleeper values are updated when config is loaded.
    62  	scannerSleeper = newDynamicSleeper(10, 10*time.Second)
    63  	scannerCycle   = &safeDuration{}
    64  )
    65  
    66  // initDataScanner will start the scanner in the background.
    67  func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
    68  	go runDataScanner(ctx, objAPI)
    69  }
    70  
    71  type safeDuration struct {
    72  	sync.Mutex
    73  	t time.Duration
    74  }
    75  
    76  func (s *safeDuration) Update(t time.Duration) {
    77  	s.Lock()
    78  	defer s.Unlock()
    79  	s.t = t
    80  }
    81  
    82  func (s *safeDuration) Get() time.Duration {
    83  	s.Lock()
    84  	defer s.Unlock()
    85  	return s.t
    86  }
    87  
    88  // runDataScanner will start a data scanner.
    89  // The function will block until the context is canceled.
    90  // There should only ever be one scanner running per cluster.
    91  func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
    92  	var err error
    93  	// Make sure only 1 scanner is running on the cluster.
    94  	locker := objAPI.NewNSLock(minioMetaBucket, "runDataScanner.lock")
    95  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
    96  	for {
    97  		ctx, err = locker.GetLock(ctx, dataScannerLeaderLockTimeout)
    98  		if err != nil {
    99  			time.Sleep(time.Duration(r.Float64() * float64(scannerCycle.Get())))
   100  			continue
   101  		}
   102  		break
   103  		// No unlock for "leader" lock.
   104  	}
   105  
   106  	// Load current bloom cycle
   107  	nextBloomCycle := intDataUpdateTracker.current() + 1
   108  
   109  	br, err := objAPI.GetObjectNInfo(ctx, dataUsageBucket, dataUsageBloomName, nil, http.Header{}, readLock, ObjectOptions{})
   110  	if err != nil {
   111  		if !isErrObjectNotFound(err) && !isErrBucketNotFound(err) {
   112  			logger.LogIf(ctx, err)
   113  		}
   114  	} else {
   115  		if br.ObjInfo.Size == 8 {
   116  			if err = binary.Read(br, binary.LittleEndian, &nextBloomCycle); err != nil {
   117  				logger.LogIf(ctx, err)
   118  			}
   119  		}
   120  		br.Close()
   121  	}
   122  
   123  	scannerTimer := time.NewTimer(scannerCycle.Get())
   124  	defer scannerTimer.Stop()
   125  
   126  	for {
   127  		select {
   128  		case <-ctx.Done():
   129  			return
   130  		case <-scannerTimer.C:
   131  			// Reset the timer for next cycle.
   132  			scannerTimer.Reset(scannerCycle.Get())
   133  
   134  			if intDataUpdateTracker.debug {
   135  				console.Debugln("starting scanner cycle")
   136  			}
   137  
   138  			// Wait before starting next cycle and wait on startup.
   139  			results := make(chan madmin.DataUsageInfo, 1)
   140  			go storeDataUsageInBackend(ctx, objAPI, results)
   141  			bf, err := GlobalNotificationSys.updateBloomFilter(ctx, nextBloomCycle)
   142  			logger.LogIf(ctx, err)
   143  			err = objAPI.NSScanner(ctx, bf, results)
   144  			close(results)
   145  			logger.LogIf(ctx, err)
   146  			if err == nil {
   147  				// Store new cycle...
   148  				nextBloomCycle++
   149  				var tmp [8]byte
   150  				binary.LittleEndian.PutUint64(tmp[:], nextBloomCycle)
   151  				r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)))
   152  				if err != nil {
   153  					logger.LogIf(ctx, err)
   154  					continue
   155  				}
   156  
   157  				_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageBloomName, NewPutObjReader(r), ObjectOptions{})
   158  				if !isErrBucketNotFound(err) {
   159  					logger.LogIf(ctx, err)
   160  				}
   161  			}
   162  		}
   163  	}
   164  }
   165  
   166  type cachedFolder struct {
   167  	name              string
   168  	parent            *dataUsageHash
   169  	objectHealProbDiv uint32
   170  }
   171  
   172  type folderScanner struct {
   173  	root       string
   174  	getSize    getSizeFn
   175  	oldCache   dataUsageCache
   176  	newCache   dataUsageCache
   177  	withFilter *bloomFilter
   178  
   179  	dataUsageScannerDebug bool
   180  	healFolderInclude     uint32 // Include a clean folder one in n cycles.
   181  	healObjectSelect      uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
   182  
   183  	newFolders      []cachedFolder
   184  	existingFolders []cachedFolder
   185  	disks           []StorageAPI
   186  }
   187  
   188  // scanDataFolder will scanner the basepath+cache.Info.Name and return an updated cache.
   189  // The returned cache will always be valid, but may not be updated from the existing.
   190  // Before each operation sleepDuration is called which can be used to temporarily halt the scanner.
   191  // If the supplied context is canceled the function will return at the first chance.
   192  func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) {
   193  	t := UTCNow()
   194  
   195  	logPrefix := color.Green("data-usage: ")
   196  	logSuffix := color.Blue("- %v + %v", basePath, cache.Info.Name)
   197  	if intDataUpdateTracker.debug {
   198  		defer func() {
   199  			console.Debugf(logPrefix+" Scanner time: %v %s\n", time.Since(t), logSuffix)
   200  		}()
   201  
   202  	}
   203  
   204  	switch cache.Info.Name {
   205  	case "", dataUsageRoot:
   206  		return cache, errors.New("internal error: root scan attempted")
   207  	}
   208  
   209  	skipHeal := cache.Info.SkipHealing
   210  
   211  	s := folderScanner{
   212  		root:                  basePath,
   213  		getSize:               getSize,
   214  		oldCache:              cache,
   215  		newCache:              dataUsageCache{Info: cache.Info},
   216  		newFolders:            nil,
   217  		existingFolders:       nil,
   218  		dataUsageScannerDebug: intDataUpdateTracker.debug,
   219  		healFolderInclude:     0,
   220  		healObjectSelect:      0,
   221  	}
   222  
   223  	// Add disks for set healing.
   224  	if len(cache.Disks) > 0 {
   225  		objAPI, ok := newObjectLayerFn().(*erasureServerPools)
   226  		if ok {
   227  			s.disks = objAPI.GetDisksID(cache.Disks...)
   228  			if len(s.disks) != len(cache.Disks) {
   229  				console.Debugf(logPrefix+"Missing disks, want %d, found %d. Cannot heal. %s\n", len(cache.Disks), len(s.disks), logSuffix)
   230  				s.disks = s.disks[:0]
   231  			}
   232  		}
   233  	}
   234  
   235  	// Enable healing in XL mode.
   236  	if globalIsErasure {
   237  		// Include a clean folder one in n cycles.
   238  		s.healFolderInclude = healFolderIncludeProb
   239  		// Do a heal check on an object once every n cycles. Must divide into healFolderInclude
   240  		s.healObjectSelect = healObjectSelectProb
   241  	}
   242  	if len(cache.Info.BloomFilter) > 0 {
   243  		s.withFilter = &bloomFilter{BloomFilter: &bloom.BloomFilter{}}
   244  		_, err := s.withFilter.ReadFrom(bytes.NewReader(cache.Info.BloomFilter))
   245  		if err != nil {
   246  			logger.LogIf(ctx, err, logPrefix+"Error reading bloom filter")
   247  			s.withFilter = nil
   248  		}
   249  	}
   250  	if s.dataUsageScannerDebug {
   251  		console.Debugf(logPrefix+"Start scanning. Bloom filter: %v %s\n", s.withFilter != nil, logSuffix)
   252  	}
   253  
   254  	done := ctx.Done()
   255  	var flattenLevels = 2
   256  
   257  	if s.dataUsageScannerDebug {
   258  		console.Debugf(logPrefix+"Cycle: %v, Entries: %v %s\n", cache.Info.NextCycle, len(cache.Cache), logSuffix)
   259  	}
   260  
   261  	// Always scan flattenLevels deep. Cache root is level 0.
   262  	todo := []cachedFolder{{name: cache.Info.Name, objectHealProbDiv: 1}}
   263  	for i := 0; i < flattenLevels; i++ {
   264  		if s.dataUsageScannerDebug {
   265  			console.Debugf(logPrefix+"Level %v, scanning %v directories. %s\n", i, len(todo), logSuffix)
   266  		}
   267  		select {
   268  		case <-done:
   269  			return cache, ctx.Err()
   270  		default:
   271  		}
   272  		var err error
   273  		todo, err = s.scanQueuedLevels(ctx, todo, i == flattenLevels-1, skipHeal)
   274  		if err != nil {
   275  			// No useful information...
   276  			return cache, err
   277  		}
   278  	}
   279  
   280  	if s.dataUsageScannerDebug {
   281  		console.Debugf(logPrefix+"New folders: %v %s\n", s.newFolders, logSuffix)
   282  	}
   283  
   284  	// Add new folders first
   285  	for _, folder := range s.newFolders {
   286  		select {
   287  		case <-done:
   288  			return s.newCache, ctx.Err()
   289  		default:
   290  		}
   291  		du, err := s.deepScanFolder(ctx, folder, skipHeal)
   292  		if err != nil {
   293  			logger.LogIf(ctx, err)
   294  			continue
   295  		}
   296  		if du == nil {
   297  			console.Debugln(logPrefix + "no disk usage provided" + logSuffix)
   298  			continue
   299  		}
   300  
   301  		s.newCache.replace(folder.name, "", *du)
   302  		// Add to parent manually
   303  		if folder.parent != nil {
   304  			parent := s.newCache.Cache[folder.parent.Key()]
   305  			parent.addChildString(folder.name)
   306  		}
   307  	}
   308  
   309  	if s.dataUsageScannerDebug {
   310  		console.Debugf(logPrefix+"Existing folders: %v %s\n", len(s.existingFolders), logSuffix)
   311  	}
   312  
   313  	// Do selective scanning of existing folders.
   314  	for _, folder := range s.existingFolders {
   315  		select {
   316  		case <-done:
   317  			return s.newCache, ctx.Err()
   318  		default:
   319  		}
   320  		h := hashPath(folder.name)
   321  		if !h.mod(s.oldCache.Info.NextCycle, dataUsageUpdateDirCycles) {
   322  			if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
   323  				s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
   324  				continue
   325  			} else {
   326  				folder.objectHealProbDiv = s.healFolderInclude
   327  			}
   328  			folder.objectHealProbDiv = dataUsageUpdateDirCycles
   329  		}
   330  		if s.withFilter != nil {
   331  			_, prefix := path2BucketObjectWithBasePath(basePath, folder.name)
   332  			if s.oldCache.Info.lifeCycle == nil || !s.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
   333  				// If folder isn't in filter, skip it completely.
   334  				if !s.withFilter.containsDir(folder.name) {
   335  					if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
   336  						if s.dataUsageScannerDebug {
   337  							console.Debugf(logPrefix+"Skipping non-updated folder: %v %s\n", folder, logSuffix)
   338  						}
   339  						s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
   340  						continue
   341  					} else {
   342  						if s.dataUsageScannerDebug {
   343  							console.Debugf(logPrefix+"Adding non-updated folder to heal check: %v %s\n", folder.name, logSuffix)
   344  						}
   345  						// Update probability of including objects
   346  						folder.objectHealProbDiv = s.healFolderInclude
   347  					}
   348  				}
   349  			}
   350  		}
   351  
   352  		// Update on this cycle...
   353  		du, err := s.deepScanFolder(ctx, folder, skipHeal)
   354  		if err != nil {
   355  			logger.LogIf(ctx, err)
   356  			continue
   357  		}
   358  		if du == nil {
   359  			logger.LogIf(ctx, errors.New("data-usage: no disk usage provided"))
   360  			continue
   361  		}
   362  		s.newCache.replaceHashed(h, folder.parent, *du)
   363  	}
   364  	if s.dataUsageScannerDebug {
   365  		console.Debugf(logPrefix+"Finished scanner, %v entries %s\n", len(s.newCache.Cache), logSuffix)
   366  	}
   367  	s.newCache.Info.LastUpdate = UTCNow()
   368  	s.newCache.Info.NextCycle++
   369  	return s.newCache, nil
   370  }
   371  
   372  // scanQueuedLevels will scan the provided folders.
   373  // Files found in the folders will be added to f.newCache.
   374  // If final is provided folders will be put into f.newFolders or f.existingFolders.
   375  // If final is not provided the folders found are returned from the function.
   376  func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFolder, final bool, skipHeal bool) ([]cachedFolder, error) {
   377  	var nextFolders []cachedFolder
   378  	done := ctx.Done()
   379  	scannerLogPrefix := color.Green("folder-scanner:")
   380  	for _, folder := range folders {
   381  		select {
   382  		case <-done:
   383  			return nil, ctx.Err()
   384  		default:
   385  		}
   386  		thisHash := hashPath(folder.name)
   387  		existing := f.oldCache.findChildrenCopy(thisHash)
   388  
   389  		// If there are lifecycle rules for the prefix, remove the filter.
   390  		filter := f.withFilter
   391  		_, prefix := path2BucketObjectWithBasePath(f.root, folder.name)
   392  		var activeLifeCycle *lifecycle.Lifecycle
   393  		if f.oldCache.Info.lifeCycle != nil && f.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
   394  			if f.dataUsageScannerDebug {
   395  				console.Debugf(scannerLogPrefix+" Prefix %q has active rules\n", prefix)
   396  			}
   397  			activeLifeCycle = f.oldCache.Info.lifeCycle
   398  			filter = nil
   399  		}
   400  		if _, ok := f.oldCache.Cache[thisHash.Key()]; filter != nil && ok {
   401  			// If folder isn't in filter and we have data, skip it completely.
   402  			if folder.name != dataUsageRoot && !filter.containsDir(folder.name) {
   403  				if !thisHash.mod(f.oldCache.Info.NextCycle, f.healFolderInclude/folder.objectHealProbDiv) {
   404  					f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent)
   405  					if f.dataUsageScannerDebug {
   406  						console.Debugf(scannerLogPrefix+" Skipping non-updated folder: %v\n", folder.name)
   407  					}
   408  					continue
   409  				} else {
   410  					if f.dataUsageScannerDebug {
   411  						console.Debugf(scannerLogPrefix+" Adding non-updated folder to heal check: %v\n", folder.name)
   412  					}
   413  					// If probability was already scannerHealFolderInclude, keep it.
   414  					folder.objectHealProbDiv = f.healFolderInclude
   415  				}
   416  			}
   417  		}
   418  		scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
   419  
   420  		cache := dataUsageEntry{}
   421  
   422  		err := readDirFn(path.Join(f.root, folder.name), func(entName string, typ os.FileMode) error {
   423  			// Parse
   424  			entName = pathClean(path.Join(folder.name, entName))
   425  			if entName == "" {
   426  				if f.dataUsageScannerDebug {
   427  					console.Debugf(scannerLogPrefix+" no bucket (%s,%s)\n", f.root, entName)
   428  				}
   429  				return errDoneForNow
   430  			}
   431  			bucket, prefix := path2BucketObjectWithBasePath(f.root, entName)
   432  			if bucket == "" {
   433  				if f.dataUsageScannerDebug {
   434  					console.Debugf(scannerLogPrefix+" no bucket (%s,%s)\n", f.root, entName)
   435  				}
   436  				return errDoneForNow
   437  			}
   438  
   439  			if isReservedOrInvalidBucket(bucket, false) {
   440  				if f.dataUsageScannerDebug {
   441  					console.Debugf(scannerLogPrefix+" invalid bucket: %v, entry: %v\n", bucket, entName)
   442  				}
   443  				return errDoneForNow
   444  			}
   445  
   446  			select {
   447  			case <-done:
   448  				return errDoneForNow
   449  			default:
   450  			}
   451  
   452  			if typ&os.ModeDir != 0 {
   453  				h := hashPath(entName)
   454  				_, exists := f.oldCache.Cache[h.Key()]
   455  				cache.addChildString(entName)
   456  
   457  				this := cachedFolder{name: entName, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv}
   458  
   459  				delete(existing, h.Key()) // h.Key() already accounted for.
   460  
   461  				cache.addChild(h)
   462  
   463  				if final {
   464  					if exists {
   465  						f.existingFolders = append(f.existingFolders, this)
   466  					} else {
   467  						f.newFolders = append(f.newFolders, this)
   468  					}
   469  				} else {
   470  					nextFolders = append(nextFolders, this)
   471  				}
   472  				return nil
   473  			}
   474  
   475  			// Dynamic time delay.
   476  			wait := scannerSleeper.Timer(ctx)
   477  
   478  			// Get file size, ignore errors.
   479  			item := scannerItem{
   480  				Path:       path.Join(f.root, entName),
   481  				Typ:        typ,
   482  				bucket:     bucket,
   483  				prefix:     path.Dir(prefix),
   484  				objectName: path.Base(entName),
   485  				debug:      f.dataUsageScannerDebug,
   486  				lifeCycle:  activeLifeCycle,
   487  				heal:       thisHash.mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure,
   488  			}
   489  
   490  			// if the drive belongs to an erasure set
   491  			// that is already being healed, skip the
   492  			// healing attempt on this drive.
   493  			item.heal = item.heal && !skipHeal
   494  
   495  			sizeSummary, err := f.getSize(item)
   496  			if err == errSkipFile {
   497  				wait() // wait to proceed to next entry.
   498  
   499  				return nil
   500  			}
   501  
   502  			// successfully read means we have a valid object.
   503  
   504  			// Remove filename i.e is the meta file to construct object name
   505  			item.transformMetaDir()
   506  
   507  			// Object already accounted for, remove from heal map,
   508  			// simply because getSize() function already heals the
   509  			// object.
   510  			delete(existing, path.Join(item.bucket, item.objectPath()))
   511  
   512  			cache.addSizes(sizeSummary)
   513  			cache.Objects++
   514  			cache.ObjSizes.add(sizeSummary.totalSize)
   515  
   516  			wait() // wait to proceed to next entry.
   517  
   518  			return nil
   519  		})
   520  		if err != nil {
   521  			return nil, err
   522  		}
   523  
   524  		if f.healObjectSelect == 0 {
   525  			// If we are not scanning, return now.
   526  			f.newCache.replaceHashed(thisHash, folder.parent, cache)
   527  			continue
   528  		}
   529  
   530  		objAPI, ok := newObjectLayerFn().(*erasureServerPools)
   531  		if !ok || len(f.disks) == 0 {
   532  			continue
   533  		}
   534  
   535  		bgSeq, found := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
   536  		if !found {
   537  			continue
   538  		}
   539  
   540  		// Whatever remains in 'existing' are folders at this level
   541  		// that existed in the previous run but wasn't found now.
   542  		//
   543  		// This may be because of 2 reasons:
   544  		//
   545  		// 1) The folder/object was deleted.
   546  		// 2) We come from another disk and this disk missed the write.
   547  		//
   548  		// We therefore perform a heal check.
   549  		// If that doesn't bring it back we remove the folder and assume it was deleted.
   550  		// This means that the next run will not look for it.
   551  		// How to resolve results.
   552  		resolver := metadataResolutionParams{
   553  			dirQuorum: getReadQuorum(len(f.disks)),
   554  			objQuorum: getReadQuorum(len(f.disks)),
   555  			bucket:    "",
   556  		}
   557  
   558  		healObjectsPrefix := color.Green("healObjects:")
   559  		for k := range existing {
   560  			bucket, prefix := path2BucketObject(k)
   561  			if f.dataUsageScannerDebug {
   562  				console.Debugf(scannerLogPrefix+" checking disappeared folder: %v/%v\n", bucket, prefix)
   563  			}
   564  
   565  			// Dynamic time delay.
   566  			wait := scannerSleeper.Timer(ctx)
   567  			resolver.bucket = bucket
   568  
   569  			foundObjs := false
   570  			dangling := false
   571  			ctx, cancel := context.WithCancel(ctx)
   572  
   573  			err := listPathRaw(ctx, listPathRawOptions{
   574  				disks:          f.disks,
   575  				bucket:         bucket,
   576  				path:           prefix,
   577  				recursive:      true,
   578  				reportNotFound: true,
   579  				minDisks:       len(f.disks), // We want full consistency.
   580  				// Weird, maybe transient error.
   581  				agreed: func(entry metaCacheEntry) {
   582  					if f.dataUsageScannerDebug {
   583  						console.Debugf(healObjectsPrefix+" got agreement: %v\n", entry.name)
   584  					}
   585  				},
   586  				// Some disks have data for this.
   587  				partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
   588  					if f.dataUsageScannerDebug {
   589  						console.Debugf(healObjectsPrefix+" got partial, %d agreed, errs: %v\n", nAgreed, errs)
   590  					}
   591  
   592  					// agreed value less than expected quorum
   593  					dangling = nAgreed < resolver.objQuorum || nAgreed < resolver.dirQuorum
   594  
   595  					// Sleep and reset.
   596  					wait()
   597  					wait = scannerSleeper.Timer(ctx)
   598  					entry, ok := entries.resolve(&resolver)
   599  					if !ok {
   600  						for _, err := range errs {
   601  							if err != nil {
   602  								return
   603  							}
   604  						}
   605  
   606  						// If no errors, queue it for healing.
   607  						entry, _ = entries.firstFound()
   608  					}
   609  
   610  					if f.dataUsageScannerDebug {
   611  						console.Debugf(healObjectsPrefix+" resolved to: %v, dir: %v\n", entry.name, entry.isDir())
   612  					}
   613  
   614  					if entry.isDir() {
   615  						return
   616  					}
   617  					// We got an entry which we should be able to heal.
   618  					fiv, err := entry.fileInfoVersions(bucket)
   619  					if err != nil {
   620  						err := bgSeq.queueHealTask(healSource{
   621  							bucket:    bucket,
   622  							object:    entry.name,
   623  							versionID: "",
   624  						}, madmin.HealItemObject)
   625  						if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
   626  							logger.LogIf(ctx, err)
   627  						}
   628  						foundObjs = foundObjs || err == nil
   629  						return
   630  					}
   631  					for _, ver := range fiv.Versions {
   632  						// Sleep and reset.
   633  						wait()
   634  						wait = scannerSleeper.Timer(ctx)
   635  						err := bgSeq.queueHealTask(healSource{
   636  							bucket:    bucket,
   637  							object:    fiv.Name,
   638  							versionID: ver.VersionID,
   639  						}, madmin.HealItemObject)
   640  						if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
   641  							logger.LogIf(ctx, err)
   642  						}
   643  						foundObjs = foundObjs || err == nil
   644  					}
   645  				},
   646  				// Too many disks failed.
   647  				finished: func(errs []error) {
   648  					if f.dataUsageScannerDebug {
   649  						console.Debugf(healObjectsPrefix+" too many errors: %v\n", errs)
   650  					}
   651  					cancel()
   652  				},
   653  			})
   654  
   655  			if f.dataUsageScannerDebug && err != nil && err != errFileNotFound {
   656  				console.Debugf(healObjectsPrefix+" checking returned value %v (%T)\n", err, err)
   657  			}
   658  
   659  			// If we found one or more disks with this folder, delete it.
   660  			if err == nil && dangling {
   661  				if f.dataUsageScannerDebug {
   662  					console.Debugf(healObjectsPrefix+" deleting dangling directory %s\n", prefix)
   663  				}
   664  
   665  				objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{
   666  					Recursive: true,
   667  					Remove:    healDeleteDangling,
   668  				},
   669  					func(bucket, object, versionID string) error {
   670  						// Wait for each heal as per scanner frequency.
   671  						wait()
   672  						wait = scannerSleeper.Timer(ctx)
   673  						return bgSeq.queueHealTask(healSource{
   674  							bucket:    bucket,
   675  							object:    object,
   676  							versionID: versionID,
   677  						}, madmin.HealItemObject)
   678  					})
   679  			}
   680  
   681  			wait()
   682  
   683  			// Add unless healing returned an error.
   684  			if foundObjs {
   685  				this := cachedFolder{name: k, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv}
   686  				cache.addChild(hashPath(k))
   687  				if final {
   688  					f.existingFolders = append(f.existingFolders, this)
   689  				} else {
   690  					nextFolders = append(nextFolders, this)
   691  				}
   692  			}
   693  		}
   694  		f.newCache.replaceHashed(thisHash, folder.parent, cache)
   695  	}
   696  	return nextFolders, nil
   697  }
   698  
   699  // deepScanFolder will deep scan a folder and return the size if no error occurs.
   700  func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder, skipHeal bool) (*dataUsageEntry, error) {
   701  	var cache dataUsageEntry
   702  
   703  	done := ctx.Done()
   704  
   705  	var addDir func(entName string, typ os.FileMode) error
   706  	var dirStack = []string{f.root, folder.name}
   707  
   708  	deepScannerLogPrefix := color.Green("deep-scanner:")
   709  	addDir = func(entName string, typ os.FileMode) error {
   710  		select {
   711  		case <-done:
   712  			return errDoneForNow
   713  		default:
   714  		}
   715  
   716  		if typ&os.ModeDir != 0 {
   717  			dirStack = append(dirStack, entName)
   718  			err := readDirFn(path.Join(dirStack...), addDir)
   719  			dirStack = dirStack[:len(dirStack)-1]
   720  			scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
   721  			return err
   722  		}
   723  
   724  		// Dynamic time delay.
   725  		wait := scannerSleeper.Timer(ctx)
   726  
   727  		// Get file size, ignore errors.
   728  		dirStack = append(dirStack, entName)
   729  		fileName := path.Join(dirStack...)
   730  		dirStack = dirStack[:len(dirStack)-1]
   731  
   732  		bucket, prefix := path2BucketObjectWithBasePath(f.root, fileName)
   733  		var activeLifeCycle *lifecycle.Lifecycle
   734  		if f.oldCache.Info.lifeCycle != nil && f.oldCache.Info.lifeCycle.HasActiveRules(prefix, false) {
   735  			if f.dataUsageScannerDebug {
   736  				console.Debugf(deepScannerLogPrefix+" Prefix %q has active rules\n", prefix)
   737  			}
   738  			activeLifeCycle = f.oldCache.Info.lifeCycle
   739  		}
   740  
   741  		item := scannerItem{
   742  			Path:       fileName,
   743  			Typ:        typ,
   744  			bucket:     bucket,
   745  			prefix:     path.Dir(prefix),
   746  			objectName: path.Base(entName),
   747  			debug:      f.dataUsageScannerDebug,
   748  			lifeCycle:  activeLifeCycle,
   749  			heal:       hashPath(path.Join(prefix, entName)).mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure,
   750  		}
   751  
   752  		// if the drive belongs to an erasure set
   753  		// that is already being healed, skip the
   754  		// healing attempt on this drive.
   755  		item.heal = item.heal && !skipHeal
   756  
   757  		sizeSummary, err := f.getSize(item)
   758  		if err == errSkipFile {
   759  			// Wait to throttle IO
   760  			wait()
   761  
   762  			return nil
   763  		}
   764  
   765  		logger.LogIf(ctx, err)
   766  		cache.addSizes(sizeSummary)
   767  		cache.Objects++
   768  		cache.ObjSizes.add(sizeSummary.totalSize)
   769  
   770  		// Wait to throttle IO
   771  		wait()
   772  
   773  		return nil
   774  	}
   775  	err := readDirFn(path.Join(dirStack...), addDir)
   776  	if err != nil {
   777  		return nil, err
   778  	}
   779  	return &cache, nil
   780  }
   781  
   782  // scannerItem represents each file while walking.
   783  type scannerItem struct {
   784  	Path string
   785  	Typ  os.FileMode
   786  
   787  	bucket     string // Bucket.
   788  	prefix     string // Only the prefix if any, does not have final object name.
   789  	objectName string // Only the object name without prefixes.
   790  	lifeCycle  *lifecycle.Lifecycle
   791  	heal       bool // Has the object been selected for heal check?
   792  	debug      bool
   793  }
   794  
   795  type sizeSummary struct {
   796  	totalSize      int64
   797  	replicatedSize int64
   798  	pendingSize    int64
   799  	failedSize     int64
   800  	replicaSize    int64
   801  	pendingCount   uint64
   802  	failedCount    uint64
   803  }
   804  
   805  type getSizeFn func(item scannerItem) (sizeSummary, error)
   806  
   807  // transformMetaDir will transform a directory to prefix/file.ext
   808  func (i *scannerItem) transformMetaDir() {
   809  	split := strings.Split(i.prefix, SlashSeparator)
   810  	if len(split) > 1 {
   811  		i.prefix = path.Join(split[:len(split)-1]...)
   812  	} else {
   813  		i.prefix = ""
   814  	}
   815  	// Object name is last element
   816  	i.objectName = split[len(split)-1]
   817  }
   818  
   819  // actionMeta contains information used to apply actions.
   820  type actionMeta struct {
   821  	oi         ObjectInfo
   822  	bitRotScan bool // indicates if bitrot check was requested.
   823  }
   824  
   825  var applyActionsLogPrefix = color.Green("applyActions:")
   826  
   827  func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
   828  	if i.debug {
   829  		if meta.oi.VersionID != "" {
   830  			console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
   831  		} else {
   832  			console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
   833  		}
   834  	}
   835  	healOpts := madmin.HealOpts{Remove: healDeleteDangling}
   836  	if meta.bitRotScan {
   837  		healOpts.ScanMode = madmin.HealDeepScan
   838  	}
   839  	res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
   840  	if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
   841  		return 0
   842  	}
   843  	if err != nil && !errors.Is(err, NotImplemented{}) {
   844  		logger.LogIf(ctx, err)
   845  		return 0
   846  	}
   847  	return res.ObjectSize
   848  }
   849  
   850  func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta actionMeta) (applied bool, size int64) {
   851  	size, err := meta.oi.GetActualSize()
   852  	if i.debug {
   853  		logger.LogIf(ctx, err)
   854  	}
   855  	if i.lifeCycle == nil {
   856  		if i.debug {
   857  			console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath())
   858  		}
   859  		return false, size
   860  	}
   861  
   862  	versionID := meta.oi.VersionID
   863  	action := i.lifeCycle.ComputeAction(
   864  		lifecycle.ObjectOpts{
   865  			Name:             i.objectPath(),
   866  			UserTags:         meta.oi.UserTags,
   867  			ModTime:          meta.oi.ModTime,
   868  			VersionID:        meta.oi.VersionID,
   869  			DeleteMarker:     meta.oi.DeleteMarker,
   870  			IsLatest:         meta.oi.IsLatest,
   871  			NumVersions:      meta.oi.NumVersions,
   872  			SuccessorModTime: meta.oi.SuccessorModTime,
   873  			RestoreOngoing:   meta.oi.RestoreOngoing,
   874  			RestoreExpires:   meta.oi.RestoreExpires,
   875  			TransitionStatus: meta.oi.TransitionStatus,
   876  		})
   877  	if i.debug {
   878  		if versionID != "" {
   879  			console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, action)
   880  		} else {
   881  			console.Debugf(applyActionsLogPrefix+" lifecycle: %q Initial scan: %v\n", i.objectPath(), action)
   882  		}
   883  	}
   884  	switch action {
   885  	case lifecycle.DeleteAction, lifecycle.DeleteVersionAction:
   886  	case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
   887  	case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
   888  	default:
   889  		// No action.
   890  		if i.debug {
   891  			console.Debugf(applyActionsLogPrefix+" object not expirable: %q\n", i.objectPath())
   892  		}
   893  		return false, size
   894  	}
   895  
   896  	obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
   897  		VersionID: versionID,
   898  	})
   899  	if err != nil {
   900  		switch err.(type) {
   901  		case MethodNotAllowed: // This happens usually for a delete marker
   902  			if !obj.DeleteMarker { // if this is not a delete marker log and return
   903  				// Do nothing - heal in the future.
   904  				logger.LogIf(ctx, err)
   905  				return false, size
   906  			}
   907  		case ObjectNotFound, VersionNotFound:
   908  			// object not found or version not found return 0
   909  			return false, 0
   910  		default:
   911  			// All other errors proceed.
   912  			logger.LogIf(ctx, err)
   913  			return false, size
   914  		}
   915  	}
   916  
   917  	action = evalActionFromLifecycle(ctx, *i.lifeCycle, obj, i.debug)
   918  	if action != lifecycle.NoneAction {
   919  		applied = applyLifecycleAction(ctx, action, o, obj)
   920  	}
   921  
   922  	if applied {
   923  		switch action {
   924  		case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
   925  			return true, size
   926  		}
   927  		// For all other lifecycle actions that remove data
   928  		return true, 0
   929  	}
   930  
   931  	return false, size
   932  }
   933  
   934  // applyActions will apply lifecycle checks on to a scanned item.
   935  // The resulting size on disk will always be returned.
   936  // The metadata will be compared to consensus on the object layer before any changes are applied.
   937  // If no metadata is supplied, -1 is returned if no action is taken.
   938  func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) int64 {
   939  	applied, size := i.applyLifecycle(ctx, o, meta)
   940  	// For instance, an applied lifecycle means we remove/transitioned an object
   941  	// from the current deployment, which means we don't have to call healing
   942  	// routine even if we are asked to do via heal flag.
   943  	if !applied && i.heal {
   944  		size = i.applyHealing(ctx, o, meta)
   945  	}
   946  	return size
   947  }
   948  
   949  func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, obj ObjectInfo, debug bool) (action lifecycle.Action) {
   950  	lcOpts := lifecycle.ObjectOpts{
   951  		Name:             obj.Name,
   952  		UserTags:         obj.UserTags,
   953  		ModTime:          obj.ModTime,
   954  		VersionID:        obj.VersionID,
   955  		DeleteMarker:     obj.DeleteMarker,
   956  		IsLatest:         obj.IsLatest,
   957  		NumVersions:      obj.NumVersions,
   958  		SuccessorModTime: obj.SuccessorModTime,
   959  		RestoreOngoing:   obj.RestoreOngoing,
   960  		RestoreExpires:   obj.RestoreExpires,
   961  		TransitionStatus: obj.TransitionStatus,
   962  	}
   963  
   964  	action = lc.ComputeAction(lcOpts)
   965  	if debug {
   966  		console.Debugf(applyActionsLogPrefix+" lifecycle: Secondary scan: %v\n", action)
   967  	}
   968  
   969  	if action == lifecycle.NoneAction {
   970  		return action
   971  	}
   972  
   973  	switch action {
   974  	case lifecycle.DeleteVersionAction, lifecycle.DeleteRestoredVersionAction:
   975  		// Defensive code, should never happen
   976  		if obj.VersionID == "" {
   977  			return lifecycle.NoneAction
   978  		}
   979  		if rcfg, _ := globalBucketObjectLockSys.Get(obj.Bucket); rcfg.LockEnabled {
   980  			locked := enforceRetentionForDeletion(ctx, obj)
   981  			if locked {
   982  				if debug {
   983  					if obj.VersionID != "" {
   984  						console.Debugf(applyActionsLogPrefix+" lifecycle: %s v(%s) is locked, not deleting\n", obj.Name, obj.VersionID)
   985  					} else {
   986  						console.Debugf(applyActionsLogPrefix+" lifecycle: %s is locked, not deleting\n", obj.Name)
   987  					}
   988  				}
   989  				return lifecycle.NoneAction
   990  			}
   991  		}
   992  	}
   993  
   994  	return action
   995  }
   996  
   997  func applyTransitionAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) bool {
   998  	opts := ObjectOptions{}
   999  	if obj.TransitionStatus == "" {
  1000  		opts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket)
  1001  		opts.VersionID = obj.VersionID
  1002  		opts.TransitionStatus = lifecycle.TransitionPending
  1003  		if _, err := objLayer.DeleteObject(ctx, obj.Bucket, obj.Name, opts); err != nil {
  1004  			if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
  1005  				return false
  1006  			}
  1007  			// Assume it is still there.
  1008  			logger.LogIf(ctx, err)
  1009  			return false
  1010  		}
  1011  	}
  1012  	globalTransitionState.queueTransitionTask(obj)
  1013  	return true
  1014  
  1015  }
  1016  
  1017  func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject bool) bool {
  1018  	lcOpts := lifecycle.ObjectOpts{
  1019  		Name:             obj.Name,
  1020  		UserTags:         obj.UserTags,
  1021  		ModTime:          obj.ModTime,
  1022  		VersionID:        obj.VersionID,
  1023  		DeleteMarker:     obj.DeleteMarker,
  1024  		IsLatest:         obj.IsLatest,
  1025  		NumVersions:      obj.NumVersions,
  1026  		SuccessorModTime: obj.SuccessorModTime,
  1027  		RestoreOngoing:   obj.RestoreOngoing,
  1028  		RestoreExpires:   obj.RestoreExpires,
  1029  		TransitionStatus: obj.TransitionStatus,
  1030  	}
  1031  
  1032  	if err := deleteTransitionedObject(ctx, objLayer, obj.Bucket, obj.Name, lcOpts, restoredObject, false); err != nil {
  1033  		if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
  1034  			return false
  1035  		}
  1036  		logger.LogIf(ctx, err)
  1037  		return false
  1038  	}
  1039  	// Notification already sent at *deleteTransitionedObject*, just return 'true' here.
  1040  	return true
  1041  }
  1042  
  1043  func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, applyOnVersion bool) bool {
  1044  	opts := ObjectOptions{}
  1045  
  1046  	if applyOnVersion {
  1047  		opts.VersionID = obj.VersionID
  1048  	}
  1049  	if opts.VersionID == "" {
  1050  		opts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket)
  1051  	}
  1052  
  1053  	obj, err := objLayer.DeleteObject(ctx, obj.Bucket, obj.Name, opts)
  1054  	if err != nil {
  1055  		if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
  1056  			return false
  1057  		}
  1058  		// Assume it is still there.
  1059  		logger.LogIf(ctx, err)
  1060  		return false
  1061  	}
  1062  
  1063  	// Send audit for the lifecycle delete operation
  1064  	auditLogLifecycle(ctx, obj.Bucket, obj.Name)
  1065  
  1066  	eventName := event.ObjectRemovedDelete
  1067  	if obj.DeleteMarker {
  1068  		eventName = event.ObjectRemovedDeleteMarkerCreated
  1069  	}
  1070  
  1071  	// Notify object deleted event.
  1072  	sendEvent(eventArgs{
  1073  		EventName:  eventName,
  1074  		BucketName: obj.Bucket,
  1075  		Object:     obj,
  1076  		Host:       "Internal: [ILM-EXPIRY]",
  1077  	})
  1078  
  1079  	return true
  1080  }
  1081  
  1082  // Apply object, object version, restored object or restored object version action on the given object
  1083  func applyExpiryRule(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject, applyOnVersion bool) bool {
  1084  	if obj.TransitionStatus != "" {
  1085  		return applyExpiryOnTransitionedObject(ctx, objLayer, obj, restoredObject)
  1086  	}
  1087  	return applyExpiryOnNonTransitionedObjects(ctx, objLayer, obj, applyOnVersion)
  1088  }
  1089  
  1090  // Perform actions (removal or transitioning of objects), return true the action is successfully performed
  1091  func applyLifecycleAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) (success bool) {
  1092  	switch action {
  1093  	case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
  1094  		success = applyExpiryRule(ctx, objLayer, obj, false, action == lifecycle.DeleteVersionAction)
  1095  	case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
  1096  		success = applyExpiryRule(ctx, objLayer, obj, true, action == lifecycle.DeleteRestoredVersionAction)
  1097  	case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
  1098  		success = applyTransitionAction(ctx, action, objLayer, obj)
  1099  	}
  1100  	return
  1101  }
  1102  
  1103  // objectPath returns the prefix and object name.
  1104  func (i *scannerItem) objectPath() string {
  1105  	return path.Join(i.prefix, i.objectName)
  1106  }
  1107  
  1108  // healReplication will heal a scanned item that has failed replication.
  1109  func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) {
  1110  	if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
  1111  		// heal delete marker replication failure or versioned delete replication failure
  1112  		if oi.ReplicationStatus == replication.Pending ||
  1113  			oi.ReplicationStatus == replication.Failed ||
  1114  			oi.VersionPurgeStatus == Failed || oi.VersionPurgeStatus == Pending {
  1115  			i.healReplicationDeletes(ctx, o, oi)
  1116  			return
  1117  		}
  1118  	}
  1119  	switch oi.ReplicationStatus {
  1120  	case replication.Pending:
  1121  		sizeS.pendingCount++
  1122  		sizeS.pendingSize += oi.Size
  1123  		globalReplicationPool.queueReplicaTask(ctx, ReplicateObjectInfo{ObjectInfo: oi, OpType: replication.HealReplicationType})
  1124  	case replication.Failed:
  1125  		sizeS.failedSize += oi.Size
  1126  		sizeS.failedCount++
  1127  		globalReplicationPool.queueReplicaTask(ctx, ReplicateObjectInfo{ObjectInfo: oi, OpType: replication.HealReplicationType})
  1128  	case replication.Completed, "COMPLETE":
  1129  		sizeS.replicatedSize += oi.Size
  1130  	case replication.Replica:
  1131  		sizeS.replicaSize += oi.Size
  1132  	}
  1133  }
  1134  
  1135  // healReplicationDeletes will heal a scanned deleted item that failed to replicate deletes.
  1136  func (i *scannerItem) healReplicationDeletes(ctx context.Context, o ObjectLayer, oi ObjectInfo) {
  1137  	// handle soft delete and permanent delete failures here.
  1138  	if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
  1139  		versionID := ""
  1140  		dmVersionID := ""
  1141  		if oi.VersionPurgeStatus.Empty() {
  1142  			dmVersionID = oi.VersionID
  1143  		} else {
  1144  			versionID = oi.VersionID
  1145  		}
  1146  		globalReplicationPool.queueReplicaDeleteTask(ctx, DeletedObjectVersionInfo{
  1147  			DeletedObject: DeletedObject{
  1148  				ObjectName:                    oi.Name,
  1149  				DeleteMarkerVersionID:         dmVersionID,
  1150  				VersionID:                     versionID,
  1151  				DeleteMarkerReplicationStatus: string(oi.ReplicationStatus),
  1152  				DeleteMarkerMTime:             DeleteMarkerMTime{oi.ModTime},
  1153  				DeleteMarker:                  oi.DeleteMarker,
  1154  				VersionPurgeStatus:            oi.VersionPurgeStatus,
  1155  			},
  1156  			Bucket: oi.Bucket,
  1157  		})
  1158  	}
  1159  }
  1160  
  1161  type dynamicSleeper struct {
  1162  	mu sync.RWMutex
  1163  
  1164  	// Sleep factor
  1165  	factor float64
  1166  
  1167  	// maximum sleep cap,
  1168  	// set to <= 0 to disable.
  1169  	maxSleep time.Duration
  1170  
  1171  	// Don't sleep at all, if time taken is below this value.
  1172  	// This is to avoid too small costly sleeps.
  1173  	minSleep time.Duration
  1174  
  1175  	// cycle will be closed
  1176  	cycle chan struct{}
  1177  }
  1178  
  1179  // newDynamicSleeper
  1180  func newDynamicSleeper(factor float64, maxWait time.Duration) *dynamicSleeper {
  1181  	return &dynamicSleeper{
  1182  		factor:   factor,
  1183  		cycle:    make(chan struct{}),
  1184  		maxSleep: maxWait,
  1185  		minSleep: 100 * time.Microsecond,
  1186  	}
  1187  }
  1188  
  1189  // Timer returns a timer that has started.
  1190  // When the returned function is called it will wait.
  1191  func (d *dynamicSleeper) Timer(ctx context.Context) func() {
  1192  	t := time.Now()
  1193  	return func() {
  1194  		doneAt := time.Now()
  1195  		for {
  1196  			// Grab current values
  1197  			d.mu.RLock()
  1198  			minWait, maxWait := d.minSleep, d.maxSleep
  1199  			factor := d.factor
  1200  			cycle := d.cycle
  1201  			d.mu.RUnlock()
  1202  			elapsed := doneAt.Sub(t)
  1203  			// Don't sleep for really small amount of time
  1204  			wantSleep := time.Duration(float64(elapsed) * factor)
  1205  			if wantSleep <= minWait {
  1206  				return
  1207  			}
  1208  			if maxWait > 0 && wantSleep > maxWait {
  1209  				wantSleep = maxWait
  1210  			}
  1211  			timer := time.NewTimer(wantSleep)
  1212  			select {
  1213  			case <-ctx.Done():
  1214  				if !timer.Stop() {
  1215  					<-timer.C
  1216  				}
  1217  				return
  1218  			case <-timer.C:
  1219  				return
  1220  			case <-cycle:
  1221  				if !timer.Stop() {
  1222  					// We expired.
  1223  					<-timer.C
  1224  					return
  1225  				}
  1226  			}
  1227  		}
  1228  	}
  1229  }
  1230  
  1231  // Sleep sleeps the specified time multiplied by the sleep factor.
  1232  // If the factor is updated the sleep will be done again with the new factor.
  1233  func (d *dynamicSleeper) Sleep(ctx context.Context, base time.Duration) {
  1234  	for {
  1235  		// Grab current values
  1236  		d.mu.RLock()
  1237  		minWait, maxWait := d.minSleep, d.maxSleep
  1238  		factor := d.factor
  1239  		cycle := d.cycle
  1240  		d.mu.RUnlock()
  1241  		// Don't sleep for really small amount of time
  1242  		wantSleep := time.Duration(float64(base) * factor)
  1243  		if wantSleep <= minWait {
  1244  			return
  1245  		}
  1246  		if maxWait > 0 && wantSleep > maxWait {
  1247  			wantSleep = maxWait
  1248  		}
  1249  		timer := time.NewTimer(wantSleep)
  1250  		select {
  1251  		case <-ctx.Done():
  1252  			if !timer.Stop() {
  1253  				<-timer.C
  1254  			}
  1255  			return
  1256  		case <-timer.C:
  1257  			return
  1258  		case <-cycle:
  1259  			if !timer.Stop() {
  1260  				// We expired.
  1261  				<-timer.C
  1262  				return
  1263  			}
  1264  		}
  1265  	}
  1266  }
  1267  
  1268  // Update the current settings and cycle all waiting.
  1269  // Parameters are the same as in the contructor.
  1270  func (d *dynamicSleeper) Update(factor float64, maxWait time.Duration) error {
  1271  	d.mu.Lock()
  1272  	defer d.mu.Unlock()
  1273  	if math.Abs(d.factor-factor) < 1e-10 && d.maxSleep == maxWait {
  1274  		return nil
  1275  	}
  1276  	// Update values and cycle waiting.
  1277  	close(d.cycle)
  1278  	d.factor = factor
  1279  	d.maxSleep = maxWait
  1280  	d.cycle = make(chan struct{})
  1281  	return nil
  1282  }
  1283  
  1284  func auditLogLifecycle(ctx context.Context, bucket, object string) {
  1285  	entry := audit.NewEntry(globalDeploymentID)
  1286  	entry.Trigger = "internal-scanner"
  1287  	entry.API.Name = "DeleteObject"
  1288  	entry.API.Bucket = bucket
  1289  	entry.API.Object = object
  1290  	ctx = logger.SetAuditEntry(ctx, &entry)
  1291  	logger.AuditLog(ctx, nil, nil, nil)
  1292  }