github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/oracleobjectstorage/oracleobjectstorage.go (about)

     1  //go:build !plan9 && !solaris && !js
     2  
     3  // Package oracleobjectstorage provides an interface to the OCI object storage system.
     4  package oracleobjectstorage
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"io"
    10  	"net/http"
    11  	"path"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/oracle/oci-go-sdk/v65/common"
    16  	"github.com/oracle/oci-go-sdk/v65/objectstorage"
    17  	"github.com/rclone/rclone/fs"
    18  	"github.com/rclone/rclone/fs/config/configmap"
    19  	"github.com/rclone/rclone/fs/config/configstruct"
    20  	"github.com/rclone/rclone/fs/hash"
    21  	"github.com/rclone/rclone/fs/operations"
    22  	"github.com/rclone/rclone/fs/walk"
    23  	"github.com/rclone/rclone/lib/bucket"
    24  	"github.com/rclone/rclone/lib/pacer"
    25  )
    26  
    27  // Register with Fs
    28  func init() {
    29  	fs.Register(&fs.RegInfo{
    30  		Name:        "oracleobjectstorage",
    31  		Description: "Oracle Cloud Infrastructure Object Storage",
    32  		Prefix:      "oos",
    33  		NewFs:       NewFs,
    34  		CommandHelp: commandHelp,
    35  		Options:     newOptions(),
    36  	})
    37  }
    38  
    39  // Fs represents a remote object storage server
    40  type Fs struct {
    41  	name          string                             // name of this remote
    42  	root          string                             // the path we are working on if any
    43  	opt           Options                            // parsed config options
    44  	ci            *fs.ConfigInfo                     // global config
    45  	features      *fs.Features                       // optional features
    46  	srv           *objectstorage.ObjectStorageClient // the connection to the object storage
    47  	rootBucket    string                             // bucket part of root (if any)
    48  	rootDirectory string                             // directory part of root (if any)
    49  	cache         *bucket.Cache                      // cache for bucket creation status
    50  	pacer         *fs.Pacer                          // To pace the API calls
    51  }
    52  
    53  // NewFs Initialize backend
    54  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
    55  	// Parse config into Options struct
    56  	opt := new(Options)
    57  	err := configstruct.Set(m, opt)
    58  	if err != nil {
    59  		return nil, err
    60  	}
    61  	err = validateSSECustomerKeyOptions(opt)
    62  	if err != nil {
    63  		return nil, err
    64  	}
    65  	ci := fs.GetConfig(ctx)
    66  	objectStorageClient, err := newObjectStorageClient(ctx, opt)
    67  	if err != nil {
    68  		return nil, err
    69  	}
    70  	pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
    71  	// Set pacer retries to 2 (1 try and 1 retry) because we are
    72  	// relying on SDK retry mechanism, but we allow 2 attempts to
    73  	// retry directory listings after XMLSyntaxError
    74  	pc.SetRetries(2)
    75  	f := &Fs{
    76  		name:  name,
    77  		opt:   *opt,
    78  		ci:    ci,
    79  		srv:   objectStorageClient,
    80  		cache: bucket.NewCache(),
    81  		pacer: pc,
    82  	}
    83  	f.setRoot(root)
    84  	f.features = (&fs.Features{
    85  		ReadMimeType:      true,
    86  		WriteMimeType:     true,
    87  		BucketBased:       true,
    88  		BucketBasedRootOK: true,
    89  		SetTier:           true,
    90  		GetTier:           true,
    91  		SlowModTime:       true,
    92  	}).Fill(ctx, f)
    93  	if f.rootBucket != "" && f.rootDirectory != "" && !strings.HasSuffix(root, "/") {
    94  		// Check to see if the (bucket,directory) is actually an existing file
    95  		oldRoot := f.root
    96  		newRoot, leaf := path.Split(oldRoot)
    97  		f.setRoot(newRoot)
    98  		_, err := f.NewObject(ctx, leaf)
    99  		if err != nil {
   100  			// File doesn't exist or is a directory so return old f
   101  			f.setRoot(oldRoot)
   102  			return f, nil
   103  		}
   104  		// return an error with fs which points to the parent
   105  		return f, fs.ErrorIsFile
   106  	}
   107  	return f, err
   108  }
   109  
   110  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   111  	if cs < minChunkSize {
   112  		return fmt.Errorf("%s is less than %s", cs, minChunkSize)
   113  	}
   114  	return nil
   115  }
   116  
   117  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   118  	err = checkUploadChunkSize(cs)
   119  	if err == nil {
   120  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   121  	}
   122  	return
   123  }
   124  
   125  func checkUploadCutoff(cs fs.SizeSuffix) error {
   126  	if cs > maxUploadCutoff {
   127  		return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
   128  	}
   129  	return nil
   130  }
   131  
   132  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   133  	err = checkUploadCutoff(cs)
   134  	if err == nil {
   135  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
   136  	}
   137  	return
   138  }
   139  
   140  func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   141  	err = checkUploadChunkSize(cs)
   142  	if err == nil {
   143  		old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
   144  	}
   145  	return
   146  }
   147  
   148  // ------------------------------------------------------------
   149  // Implement backed that represents a remote object storage server
   150  // Fs is the interface a cloud storage system must provide
   151  // ------------------------------------------------------------
   152  
   153  // Name of the remote (as passed into NewFs)
   154  func (f *Fs) Name() string {
   155  	return f.name
   156  }
   157  
   158  // Root of the remote (as passed into NewFs)
   159  func (f *Fs) Root() string {
   160  	return f.root
   161  }
   162  
   163  // String converts this Fs to a string
   164  func (f *Fs) String() string {
   165  	if f.rootBucket == "" {
   166  		return "oos:root"
   167  	}
   168  	if f.rootDirectory == "" {
   169  		return fmt.Sprintf("oos:bucket %s", f.rootBucket)
   170  	}
   171  	return fmt.Sprintf("oos:bucket %s, path %s", f.rootBucket, f.rootDirectory)
   172  }
   173  
   174  // Features returns the optional features of this Fs
   175  func (f *Fs) Features() *fs.Features {
   176  	return f.features
   177  }
   178  
   179  // Precision of the remote
   180  func (f *Fs) Precision() time.Duration {
   181  	return time.Millisecond
   182  }
   183  
   184  // Hashes returns the supported hash sets.
   185  func (f *Fs) Hashes() hash.Set {
   186  	return hash.Set(hash.MD5)
   187  }
   188  
   189  // setRoot changes the root of the Fs
   190  func (f *Fs) setRoot(root string) {
   191  	f.root = parsePath(root)
   192  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
   193  }
   194  
   195  // parsePath parses a remote 'url'
   196  func parsePath(path string) (root string) {
   197  	root = strings.Trim(path, "/")
   198  	return
   199  }
   200  
   201  // split returns bucket and bucketPath from the rootRelativePath
   202  // relative to f.root
   203  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
   204  	bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
   205  	return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
   206  }
   207  
   208  // List the objects and directories in dir into entries.  The
   209  // entries can be returned in any order but should be for a
   210  // complete directory.
   211  //
   212  // dir should be "" to list the root, and should not have
   213  // trailing slashes.
   214  //
   215  // This should return ErrDirNotFound if the directory isn't
   216  // found.
   217  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   218  	bucketName, directory := f.split(dir)
   219  	fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
   220  	if bucketName == "" {
   221  		if directory != "" {
   222  			return nil, fs.ErrorListBucketRequired
   223  		}
   224  		return f.listBuckets(ctx)
   225  	}
   226  	return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
   227  }
   228  
   229  // listFn is called from list to handle an object.
   230  type listFn func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error
   231  
   232  // list the objects into the function supplied from
   233  // the bucket and root supplied
   234  // (bucket, directory) is the starting directory
   235  // If prefix is set then it is removed from all file names
   236  // If addBucket is set then it adds the bucket to the start of the remotes generated
   237  // If recurse is set the function will recursively list
   238  // If limit is > 0 then it limits to that many files (must be less than 1000)
   239  // If hidden is set then it will list the hidden (deleted) files too.
   240  // if findFile is set it will look for files called (bucket, directory)
   241  func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int,
   242  	fn listFn) (err error) {
   243  	if prefix != "" {
   244  		prefix += "/"
   245  	}
   246  	if directory != "" {
   247  		directory += "/"
   248  	}
   249  
   250  	delimiter := ""
   251  	if !recurse {
   252  		delimiter = "/"
   253  	}
   254  	chunkSize := 1000
   255  	if limit > 0 {
   256  		chunkSize = limit
   257  	}
   258  	var request = objectstorage.ListObjectsRequest{
   259  		NamespaceName: common.String(f.opt.Namespace),
   260  		BucketName:    common.String(bucket),
   261  		Prefix:        common.String(directory),
   262  		Limit:         common.Int(chunkSize),
   263  		Fields:        common.String("name,size,etag,timeCreated,md5,timeModified,storageTier,archivalState"),
   264  	}
   265  	if delimiter != "" {
   266  		request.Delimiter = common.String(delimiter)
   267  	}
   268  
   269  	for {
   270  		var resp objectstorage.ListObjectsResponse
   271  		err = f.pacer.Call(func() (bool, error) {
   272  			var err error
   273  			resp, err = f.srv.ListObjects(ctx, request)
   274  			return shouldRetry(ctx, resp.HTTPResponse(), err)
   275  		})
   276  		if err != nil {
   277  			if ociError, ok := err.(common.ServiceError); ok {
   278  				// If it is a timeout then we want to retry that
   279  				if ociError.GetHTTPStatusCode() == http.StatusNotFound {
   280  					err = fs.ErrorDirNotFound
   281  				}
   282  			}
   283  			if f.rootBucket == "" {
   284  				// if listing from the root ignore wrong region requests returning
   285  				// empty directory
   286  				if reqErr, ok := err.(common.ServiceError); ok {
   287  					// 301 if wrong region for bucket
   288  					if reqErr.GetHTTPStatusCode() == http.StatusMovedPermanently {
   289  						fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
   290  						return nil
   291  					}
   292  				}
   293  			}
   294  			return err
   295  		}
   296  		if !recurse {
   297  			for _, commonPrefix := range resp.ListObjects.Prefixes {
   298  				if commonPrefix == "" {
   299  					fs.Logf(f, "Nil common prefix received")
   300  					continue
   301  				}
   302  				remote := commonPrefix
   303  				remote = f.opt.Enc.ToStandardPath(remote)
   304  				if !strings.HasPrefix(remote, prefix) {
   305  					fs.Logf(f, "Odd name received %q", remote)
   306  					continue
   307  				}
   308  				remote = remote[len(prefix):]
   309  				if addBucket {
   310  					remote = path.Join(bucket, remote)
   311  				}
   312  				remote = strings.TrimSuffix(remote, "/")
   313  				err = fn(remote, &objectstorage.ObjectSummary{Name: &remote}, true)
   314  				if err != nil {
   315  					return err
   316  				}
   317  			}
   318  		}
   319  		for i := range resp.Objects {
   320  			object := &resp.Objects[i]
   321  			// Finish if file name no longer has prefix
   322  			//if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
   323  			//	return nil
   324  			//}
   325  			remote := *object.Name
   326  			remote = f.opt.Enc.ToStandardPath(remote)
   327  			if !strings.HasPrefix(remote, prefix) {
   328  				continue
   329  			}
   330  			remote = remote[len(prefix):]
   331  			// Check for directory
   332  			isDirectory := remote == "" || strings.HasSuffix(remote, "/")
   333  			if addBucket {
   334  				remote = path.Join(bucket, remote)
   335  			}
   336  			// is this a directory marker?
   337  			if isDirectory && object.Size != nil && *object.Size == 0 {
   338  				continue // skip directory marker
   339  			}
   340  			if isDirectory && len(remote) > 1 {
   341  				remote = remote[:len(remote)-1]
   342  			}
   343  			err = fn(remote, object, isDirectory)
   344  			if err != nil {
   345  				return err
   346  			}
   347  		}
   348  		// end if no NextFileName
   349  		if resp.NextStartWith == nil {
   350  			break
   351  		}
   352  		request.Start = resp.NextStartWith
   353  	}
   354  	return nil
   355  }
   356  
   357  // Convert a list item into a DirEntry
   358  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectstorage.ObjectSummary, isDirectory bool) (fs.DirEntry, error) {
   359  	if isDirectory {
   360  		size := int64(0)
   361  		if object.Size != nil {
   362  			size = *object.Size
   363  		}
   364  		d := fs.NewDir(remote, time.Time{}).SetSize(size)
   365  		return d, nil
   366  	}
   367  	o, err := f.newObjectWithInfo(ctx, remote, object)
   368  	if err != nil {
   369  		return nil, err
   370  	}
   371  	return o, nil
   372  }
   373  
   374  // listDir lists a single directory
   375  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
   376  	fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
   377  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
   378  		if err != nil {
   379  			return err
   380  		}
   381  		if entry != nil {
   382  			entries = append(entries, entry)
   383  		}
   384  		return nil
   385  	}
   386  	err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
   387  	if err != nil {
   388  		return nil, err
   389  	}
   390  	// bucket must be present if listing succeeded
   391  	f.cache.MarkOK(bucket)
   392  	return entries, nil
   393  }
   394  
   395  // listBuckets returns all the buckets to out
   396  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
   397  	if f.opt.Provider == noAuth {
   398  		return nil, fmt.Errorf("can't list buckets with %v provider, use a valid auth provider in config file", noAuth)
   399  	}
   400  	var request = objectstorage.ListBucketsRequest{
   401  		NamespaceName: common.String(f.opt.Namespace),
   402  		CompartmentId: common.String(f.opt.Compartment),
   403  	}
   404  	var resp objectstorage.ListBucketsResponse
   405  	for {
   406  		err = f.pacer.Call(func() (bool, error) {
   407  			resp, err = f.srv.ListBuckets(ctx, request)
   408  			return shouldRetry(ctx, resp.HTTPResponse(), err)
   409  		})
   410  		if err != nil {
   411  			return nil, err
   412  		}
   413  		for _, item := range resp.Items {
   414  			bucketName := f.opt.Enc.ToStandardName(*item.Name)
   415  			f.cache.MarkOK(bucketName)
   416  			d := fs.NewDir(bucketName, item.TimeCreated.Time)
   417  			entries = append(entries, d)
   418  		}
   419  		if resp.OpcNextPage == nil {
   420  			break
   421  		}
   422  		request.Page = resp.OpcNextPage
   423  	}
   424  	return entries, nil
   425  }
   426  
   427  // Return an Object from a path
   428  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   429  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *objectstorage.ObjectSummary) (fs.Object, error) {
   430  	o := &Object{
   431  		fs:     f,
   432  		remote: remote,
   433  	}
   434  	if info != nil {
   435  		// Set info but not meta
   436  		if info.TimeModified == nil {
   437  			fs.Logf(o, "Failed to read last modified")
   438  			o.lastModified = time.Now()
   439  		} else {
   440  			o.lastModified = info.TimeModified.Time
   441  		}
   442  		if info.Md5 != nil {
   443  			md5, err := o.base64ToMd5(*info.Md5)
   444  			if err != nil {
   445  				o.md5 = md5
   446  			}
   447  		}
   448  		o.bytes = *info.Size
   449  		o.storageTier = storageTierMap[strings.ToLower(string(info.StorageTier))]
   450  	} else {
   451  		err := o.readMetaData(ctx) // reads info and headers, returning an error
   452  		if err != nil {
   453  			return nil, err
   454  		}
   455  	}
   456  	return o, nil
   457  }
   458  
   459  // NewObject finds the Object at remote.  If it can't be found
   460  // it returns the error fs.ErrorObjectNotFound.
   461  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   462  	return f.newObjectWithInfo(ctx, remote, nil)
   463  }
   464  
   465  // Put the object into the bucket
   466  // Copy the reader in to the new object which is returned
   467  // The new object may have been created if an error is returned
   468  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   469  	// Temporary Object under construction
   470  	o := &Object{
   471  		fs:     f,
   472  		remote: src.Remote(),
   473  	}
   474  	return o, o.Update(ctx, in, src, options...)
   475  }
   476  
   477  // PutStream uploads to the remote path with the modTime given of indeterminate size
   478  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   479  	return f.Put(ctx, in, src, options...)
   480  }
   481  
   482  // Mkdir creates the bucket if it doesn't exist
   483  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   484  	bucketName, _ := f.split(dir)
   485  	return f.makeBucket(ctx, bucketName)
   486  }
   487  
   488  // makeBucket creates the bucket if it doesn't exist
   489  func (f *Fs) makeBucket(ctx context.Context, bucketName string) error {
   490  	if f.opt.NoCheckBucket {
   491  		return nil
   492  	}
   493  	return f.cache.Create(bucketName, func() error {
   494  		details := objectstorage.CreateBucketDetails{
   495  			Name:             common.String(bucketName),
   496  			CompartmentId:    common.String(f.opt.Compartment),
   497  			PublicAccessType: objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess,
   498  		}
   499  		req := objectstorage.CreateBucketRequest{
   500  			NamespaceName:       common.String(f.opt.Namespace),
   501  			CreateBucketDetails: details,
   502  		}
   503  		err := f.pacer.Call(func() (bool, error) {
   504  			resp, err := f.srv.CreateBucket(ctx, req)
   505  			return shouldRetry(ctx, resp.HTTPResponse(), err)
   506  		})
   507  		if err == nil {
   508  			fs.Infof(f, "Bucket %q created with accessType %q", bucketName,
   509  				objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess)
   510  		}
   511  		if svcErr, ok := err.(common.ServiceError); ok {
   512  			if code := svcErr.GetCode(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
   513  				err = nil
   514  			}
   515  		}
   516  		return err
   517  	}, func() (bool, error) {
   518  		return f.bucketExists(ctx, bucketName)
   519  	})
   520  }
   521  
   522  // Check if the bucket exists
   523  //
   524  // NB this can return incorrect results if called immediately after bucket deletion
   525  func (f *Fs) bucketExists(ctx context.Context, bucketName string) (bool, error) {
   526  	req := objectstorage.HeadBucketRequest{
   527  		NamespaceName: common.String(f.opt.Namespace),
   528  		BucketName:    common.String(bucketName),
   529  	}
   530  	err := f.pacer.Call(func() (bool, error) {
   531  		resp, err := f.srv.HeadBucket(ctx, req)
   532  		return shouldRetry(ctx, resp.HTTPResponse(), err)
   533  	})
   534  	if err == nil {
   535  		return true, nil
   536  	}
   537  	if err, ok := err.(common.ServiceError); ok {
   538  		if err.GetHTTPStatusCode() == http.StatusNotFound {
   539  			return false, nil
   540  		}
   541  	}
   542  	return false, err
   543  }
   544  
   545  // Rmdir delete an empty bucket. if bucket is not empty this is will fail with appropriate error
   546  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   547  	bucketName, directory := f.split(dir)
   548  	if bucketName == "" || directory != "" {
   549  		return nil
   550  	}
   551  	return f.cache.Remove(bucketName, func() error {
   552  		req := objectstorage.DeleteBucketRequest{
   553  			NamespaceName: common.String(f.opt.Namespace),
   554  			BucketName:    common.String(bucketName),
   555  		}
   556  		err := f.pacer.Call(func() (bool, error) {
   557  			resp, err := f.srv.DeleteBucket(ctx, req)
   558  			return shouldRetry(ctx, resp.HTTPResponse(), err)
   559  		})
   560  		if err == nil {
   561  			fs.Infof(f, "Bucket %q deleted", bucketName)
   562  		}
   563  		return err
   564  	})
   565  }
   566  
   567  func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) {
   568  	if uploadID == nil || *uploadID == "" {
   569  		return nil
   570  	}
   571  	request := objectstorage.AbortMultipartUploadRequest{
   572  		NamespaceName: common.String(f.opt.Namespace),
   573  		BucketName:    bucketName,
   574  		ObjectName:    bucketPath,
   575  		UploadId:      uploadID,
   576  	}
   577  	err = f.pacer.Call(func() (bool, error) {
   578  		resp, err := f.srv.AbortMultipartUpload(ctx, request)
   579  		return shouldRetry(ctx, resp.HTTPResponse(), err)
   580  	})
   581  	return err
   582  }
   583  
   584  // cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
   585  func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration,
   586  	uploads []*objectstorage.MultipartUpload) (err error) {
   587  	fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
   588  	for _, upload := range uploads {
   589  		if upload.TimeCreated != nil && upload.Object != nil && upload.UploadId != nil {
   590  			age := time.Since(upload.TimeCreated.Time)
   591  			what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Object,
   592  				upload.TimeCreated, age)
   593  			if age > maxAge {
   594  				fs.Infof(f, "removing %s", what)
   595  				if operations.SkipDestructive(ctx, what, "remove pending upload") {
   596  					continue
   597  				}
   598  				_ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId)
   599  			}
   600  		} else {
   601  			fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
   602  		}
   603  	}
   604  	return err
   605  }
   606  
   607  // CleanUp removes all pending multipart uploads
   608  func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) {
   609  	uploadsMap, err := f.listMultipartUploadsAll(ctx)
   610  	if err != nil {
   611  		return err
   612  	}
   613  	for bucketName, uploads := range uploadsMap {
   614  		cleanErr := f.cleanUpBucket(ctx, bucketName, maxAge, uploads)
   615  		if err != nil {
   616  			fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucketName, cleanErr)
   617  			err = cleanErr
   618  		}
   619  	}
   620  	return err
   621  }
   622  
   623  // CleanUp removes all pending multipart uploads older than 24 hours
   624  func (f *Fs) CleanUp(ctx context.Context) (err error) {
   625  	return f.cleanUp(ctx, 24*time.Hour)
   626  }
   627  
   628  // ------------------------------------------------------------
   629  // Implement ListRer is an optional interfaces for Fs
   630  //------------------------------------------------------------
   631  
   632  /*
   633  ListR lists the objects and directories of the Fs starting
   634  from dir recursively into out.
   635  
   636  dir should be "" to start from the root, and should not
   637  have trailing slashes.
   638  
   639  This should return ErrDirNotFound if the directory isn't
   640  found.
   641  
   642  It should call callback for each tranche of entries read.
   643  These need not be returned in any particular order.  If
   644  callback returns an error then the listing will stop
   645  immediately.
   646  
   647  Don't implement this unless you have a more efficient way
   648  of listing recursively that doing a directory traversal.
   649  */
   650  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   651  	bucketName, directory := f.split(dir)
   652  	list := walk.NewListRHelper(callback)
   653  	listR := func(bucket, directory, prefix string, addBucket bool) error {
   654  		return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
   655  			entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
   656  			if err != nil {
   657  				return err
   658  			}
   659  			return list.Add(entry)
   660  		})
   661  	}
   662  	if bucketName == "" {
   663  		entries, err := f.listBuckets(ctx)
   664  		if err != nil {
   665  			return err
   666  		}
   667  		for _, entry := range entries {
   668  			err = list.Add(entry)
   669  			if err != nil {
   670  				return err
   671  			}
   672  			bucketName := entry.Remote()
   673  			err = listR(bucketName, "", f.rootDirectory, true)
   674  			if err != nil {
   675  				return err
   676  			}
   677  			// bucket must be present if listing succeeded
   678  			f.cache.MarkOK(bucketName)
   679  		}
   680  	} else {
   681  		err = listR(bucketName, directory, f.rootDirectory, f.rootBucket == "")
   682  		if err != nil {
   683  			return err
   684  		}
   685  		// bucket must be present if listing succeeded
   686  		f.cache.MarkOK(bucketName)
   687  	}
   688  	return list.Flush()
   689  }
   690  
   691  // Check the interfaces are satisfied
   692  var (
   693  	_ fs.Fs              = &Fs{}
   694  	_ fs.Copier          = &Fs{}
   695  	_ fs.PutStreamer     = &Fs{}
   696  	_ fs.ListRer         = &Fs{}
   697  	_ fs.Commander       = &Fs{}
   698  	_ fs.CleanUpper      = &Fs{}
   699  	_ fs.OpenChunkWriter = &Fs{}
   700  
   701  	_ fs.Object    = &Object{}
   702  	_ fs.MimeTyper = &Object{}
   703  	_ fs.GetTierer = &Object{}
   704  	_ fs.SetTierer = &Object{}
   705  )