github.com/artpar/rclone@v1.67.3/backend/storj/fs.go (about)

     1  //go:build !plan9
     2  
     3  // Package storj provides an interface to Storj decentralized object storage.
     4  package storj
     5  
     6  import (
     7  	"context"
     8  	"errors"
     9  	"fmt"
    10  	"io"
    11  	"path"
    12  	"strings"
    13  	"time"
    14  
    15  	"github.com/artpar/rclone/fs"
    16  	"github.com/artpar/rclone/fs/config"
    17  	"github.com/artpar/rclone/fs/config/configmap"
    18  	"github.com/artpar/rclone/fs/config/configstruct"
    19  	"github.com/artpar/rclone/fs/fserrors"
    20  	"github.com/artpar/rclone/fs/hash"
    21  	"github.com/artpar/rclone/lib/bucket"
    22  	"golang.org/x/text/unicode/norm"
    23  
    24  	"storj.io/uplink"
    25  	"storj.io/uplink/edge"
    26  )
    27  
    28  const (
    29  	existingProvider = "existing"
    30  	newProvider      = "new"
    31  )
    32  
    33  var satMap = map[string]string{
    34  	"us1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777",
    35  	"eu1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@eu1.storj.io:7777",
    36  	"ap1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@ap1.storj.io:7777",
    37  }
    38  
    39  // Register with Fs
    40  func init() {
    41  	fs.Register(&fs.RegInfo{
    42  		Name:        "storj",
    43  		Description: "Storj Decentralized Cloud Storage",
    44  		Aliases:     []string{"tardigrade"},
    45  		NewFs:       NewFs,
    46  		Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
    47  			provider, _ := m.Get(fs.ConfigProvider)
    48  
    49  			config.FileDeleteKey(name, fs.ConfigProvider)
    50  
    51  			if provider == newProvider {
    52  				satelliteString, _ := m.Get("satellite_address")
    53  				apiKey, _ := m.Get("api_key")
    54  				passphrase, _ := m.Get("passphrase")
    55  
    56  				// satelliteString contains always default and passphrase can be empty
    57  				if apiKey == "" {
    58  					return nil, nil
    59  				}
    60  
    61  				satellite, found := satMap[satelliteString]
    62  				if !found {
    63  					satellite = satelliteString
    64  				}
    65  
    66  				access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
    67  				if err != nil {
    68  					return nil, fmt.Errorf("couldn't create access grant: %w", err)
    69  				}
    70  
    71  				serializedAccess, err := access.Serialize()
    72  				if err != nil {
    73  					return nil, fmt.Errorf("couldn't serialize access grant: %w", err)
    74  				}
    75  				m.Set("satellite_address", satellite)
    76  				m.Set("access_grant", serializedAccess)
    77  			} else if provider == existingProvider {
    78  				config.FileDeleteKey(name, "satellite_address")
    79  				config.FileDeleteKey(name, "api_key")
    80  				config.FileDeleteKey(name, "passphrase")
    81  			} else {
    82  				return nil, fmt.Errorf("invalid provider type: %s", provider)
    83  			}
    84  			return nil, nil
    85  		},
    86  		Options: []fs.Option{
    87  			{
    88  				Name:    fs.ConfigProvider,
    89  				Help:    "Choose an authentication method.",
    90  				Default: existingProvider,
    91  				Examples: []fs.OptionExample{{
    92  					Value: "existing",
    93  					Help:  "Use an existing access grant.",
    94  				}, {
    95  					Value: newProvider,
    96  					Help:  "Create a new access grant from satellite address, API key, and passphrase.",
    97  				},
    98  				}},
    99  			{
   100  				Name:      "access_grant",
   101  				Help:      "Access grant.",
   102  				Provider:  "existing",
   103  				Sensitive: true,
   104  			},
   105  			{
   106  				Name:     "satellite_address",
   107  				Help:     "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
   108  				Provider: newProvider,
   109  				Default:  "us1.storj.io",
   110  				Examples: []fs.OptionExample{{
   111  					Value: "us1.storj.io",
   112  					Help:  "US1",
   113  				}, {
   114  					Value: "eu1.storj.io",
   115  					Help:  "EU1",
   116  				}, {
   117  					Value: "ap1.storj.io",
   118  					Help:  "AP1",
   119  				},
   120  				},
   121  			},
   122  			{
   123  				Name:      "api_key",
   124  				Help:      "API key.",
   125  				Provider:  newProvider,
   126  				Sensitive: true,
   127  			},
   128  			{
   129  				Name:      "passphrase",
   130  				Help:      "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
   131  				Provider:  newProvider,
   132  				Sensitive: true,
   133  			},
   134  		},
   135  	})
   136  }
   137  
   138  // Options defines the configuration for this backend
   139  type Options struct {
   140  	Access string `config:"access_grant"`
   141  
   142  	SatelliteAddress string `config:"satellite_address"`
   143  	APIKey           string `config:"api_key"`
   144  	Passphrase       string `config:"passphrase"`
   145  }
   146  
   147  // Fs represents a remote to Storj
   148  type Fs struct {
   149  	name string // the name of the remote
   150  	root string // root of the filesystem
   151  
   152  	opts     Options      // parsed options
   153  	features *fs.Features // optional features
   154  
   155  	access *uplink.Access // parsed scope
   156  
   157  	project *uplink.Project // project client
   158  }
   159  
   160  // Check the interfaces are satisfied.
   161  var (
   162  	_ fs.Fs           = &Fs{}
   163  	_ fs.ListRer      = &Fs{}
   164  	_ fs.PutStreamer  = &Fs{}
   165  	_ fs.Mover        = &Fs{}
   166  	_ fs.Copier       = &Fs{}
   167  	_ fs.Purger       = &Fs{}
   168  	_ fs.PublicLinker = &Fs{}
   169  )
   170  
   171  // NewFs creates a filesystem backed by Storj.
   172  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
   173  	// Setup filesystem and connection to Storj
   174  	root = norm.NFC.String(root)
   175  	root = strings.Trim(root, "/")
   176  
   177  	f := &Fs{
   178  		name: name,
   179  		root: root,
   180  	}
   181  
   182  	// Parse config into Options struct
   183  	err = configstruct.Set(m, &f.opts)
   184  	if err != nil {
   185  		return nil, err
   186  	}
   187  
   188  	// Parse access
   189  	var access *uplink.Access
   190  
   191  	if f.opts.Access != "" {
   192  		access, err = uplink.ParseAccess(f.opts.Access)
   193  		if err != nil {
   194  			return nil, fmt.Errorf("storj: access: %w", err)
   195  		}
   196  	}
   197  
   198  	if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
   199  		access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
   200  		if err != nil {
   201  			return nil, fmt.Errorf("storj: access: %w", err)
   202  		}
   203  
   204  		serializedAccess, err := access.Serialize()
   205  		if err != nil {
   206  			return nil, fmt.Errorf("storj: access: %w", err)
   207  		}
   208  
   209  		err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
   210  		if err != nil {
   211  			return nil, fmt.Errorf("storj: access: %w", err)
   212  		}
   213  	}
   214  
   215  	if access == nil {
   216  		return nil, errors.New("access not found")
   217  	}
   218  
   219  	f.access = access
   220  
   221  	f.features = (&fs.Features{
   222  		BucketBased:       true,
   223  		BucketBasedRootOK: true,
   224  	}).Fill(ctx, f)
   225  
   226  	project, err := f.connect(ctx)
   227  	if err != nil {
   228  		return nil, err
   229  	}
   230  	f.project = project
   231  
   232  	// Root validation needs to check the following: If a bucket path is
   233  	// specified and exists, then the object must be a directory.
   234  	//
   235  	// NOTE: At this point this must return the filesystem object we've
   236  	// created so far even if there is an error.
   237  	if root != "" {
   238  		bucketName, bucketPath := bucket.Split(root)
   239  
   240  		if bucketName != "" && bucketPath != "" {
   241  			_, err = project.StatBucket(ctx, bucketName)
   242  			if err != nil {
   243  				return f, fmt.Errorf("storj: bucket: %w", err)
   244  			}
   245  
   246  			object, err := project.StatObject(ctx, bucketName, bucketPath)
   247  			if err == nil {
   248  				if !object.IsPrefix {
   249  					// If the root is actually a file we
   250  					// need to return the *parent*
   251  					// directory of the root instead and an
   252  					// error that the original root
   253  					// requested is a file.
   254  					newRoot := path.Dir(f.root)
   255  					if newRoot == "." {
   256  						newRoot = ""
   257  					}
   258  					f.root = newRoot
   259  
   260  					return f, fs.ErrorIsFile
   261  				}
   262  			}
   263  		}
   264  	}
   265  
   266  	return f, nil
   267  }
   268  
   269  // connect opens a connection to Storj.
   270  func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
   271  	fs.Debugf(f, "connecting...")
   272  	defer fs.Debugf(f, "connected: %+v", err)
   273  
   274  	cfg := uplink.Config{
   275  		UserAgent: "rclone",
   276  	}
   277  
   278  	project, err = cfg.OpenProject(ctx, f.access)
   279  	if err != nil {
   280  		return nil, fmt.Errorf("storj: project: %w", err)
   281  	}
   282  
   283  	return
   284  }
   285  
   286  // absolute computes the absolute bucket name and path from the filesystem root
   287  // and the relative path provided.
   288  func (f *Fs) absolute(relative string) (bucketName, bucketPath string) {
   289  	bn, bp := bucket.Split(path.Join(f.root, relative))
   290  
   291  	// NOTE: Technically libuplink does not care about the encoding. It is
   292  	// happy to work with them as opaque byte sequences. However, rclone
   293  	// has a test that requires two paths with the same normalized form
   294  	// (but different un-normalized forms) to point to the same file. This
   295  	// means we have to normalize before we interact with libuplink.
   296  	return norm.NFC.String(bn), norm.NFC.String(bp)
   297  }
   298  
   299  // Name of the remote (as passed into NewFs)
   300  func (f *Fs) Name() string {
   301  	return f.name
   302  }
   303  
   304  // Root of the remote (as passed into NewFs)
   305  func (f *Fs) Root() string {
   306  	return f.root
   307  }
   308  
   309  // String returns a description of the FS
   310  func (f *Fs) String() string {
   311  	return fmt.Sprintf("FS sj://%s", f.root)
   312  }
   313  
   314  // Precision of the ModTimes in this Fs
   315  func (f *Fs) Precision() time.Duration {
   316  	return time.Nanosecond
   317  }
   318  
   319  // Hashes returns the supported hash types of the filesystem.
   320  func (f *Fs) Hashes() hash.Set {
   321  	return hash.NewHashSet()
   322  }
   323  
   324  // Features returns the optional features of this Fs
   325  func (f *Fs) Features() *fs.Features {
   326  	return f.features
   327  }
   328  
   329  // List the objects and directories in relative into entries. The entries can
   330  // be returned in any order but should be for a complete directory.
   331  //
   332  // relative should be "" to list the root, and should not have trailing
   333  // slashes.
   334  //
   335  // This should return fs.ErrDirNotFound if the directory isn't found.
   336  func (f *Fs) List(ctx context.Context, relative string) (entries fs.DirEntries, err error) {
   337  	fs.Debugf(f, "ls ./%s", relative)
   338  
   339  	bucketName, bucketPath := f.absolute(relative)
   340  
   341  	defer func() {
   342  		if errors.Is(err, uplink.ErrBucketNotFound) {
   343  			err = fs.ErrorDirNotFound
   344  		}
   345  	}()
   346  
   347  	if bucketName == "" {
   348  		if bucketPath != "" {
   349  			return nil, fs.ErrorListBucketRequired
   350  		}
   351  
   352  		return f.listBuckets(ctx)
   353  	}
   354  
   355  	return f.listObjects(ctx, relative, bucketName, bucketPath)
   356  }
   357  
   358  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
   359  	fs.Debugf(f, "BKT ls")
   360  
   361  	buckets := f.project.ListBuckets(ctx, nil)
   362  
   363  	for buckets.Next() {
   364  		bucket := buckets.Item()
   365  
   366  		entries = append(entries, fs.NewDir(bucket.Name, bucket.Created))
   367  	}
   368  
   369  	return entries, buckets.Err()
   370  }
   371  
   372  // newDirEntry creates a directory entry from an uplink object.
   373  //
   374  // NOTE: Getting the exact behavior required by rclone is somewhat tricky. The
   375  // path manipulation here is necessary to cover all the different ways the
   376  // filesystem and object could be initialized and combined.
   377  func (f *Fs) newDirEntry(relative, prefix string, object *uplink.Object) fs.DirEntry {
   378  	if object.IsPrefix {
   379  		//                         . The entry must include the relative path as its prefix. Depending on
   380  		//                         | what is being listed and how the filesystem root was initialized the
   381  		//                         | relative path may be empty (and so we use path joining here to ensure
   382  		//                         | we don't end up with an empty path segment).
   383  		//                         |
   384  		//                         |                    . Remove the prefix used during listing.
   385  		//                         |                    |
   386  		//                         |                    |           . Remove the trailing slash.
   387  		//                         |                    |           |
   388  		//                         v                    v           v
   389  		return fs.NewDir(path.Join(relative, object.Key[len(prefix):len(object.Key)-1]), object.System.Created)
   390  	}
   391  
   392  	return newObjectFromUplink(f, relative, object)
   393  }
   394  
   395  func (f *Fs) listObjects(ctx context.Context, relative, bucketName, bucketPath string) (entries fs.DirEntries, err error) {
   396  	fs.Debugf(f, "OBJ ls ./%s (%q, %q)", relative, bucketName, bucketPath)
   397  
   398  	opts := &uplink.ListObjectsOptions{
   399  		Prefix: newPrefix(bucketPath),
   400  
   401  		System: true,
   402  		Custom: true,
   403  	}
   404  	fs.Debugf(f, "opts %+v", opts)
   405  
   406  	objects := f.project.ListObjects(ctx, bucketName, opts)
   407  
   408  	for objects.Next() {
   409  		entries = append(entries, f.newDirEntry(relative, opts.Prefix, objects.Item()))
   410  	}
   411  
   412  	err = objects.Err()
   413  	if err != nil {
   414  		return nil, err
   415  	}
   416  
   417  	return entries, nil
   418  }
   419  
   420  // ListR lists the objects and directories of the Fs starting from dir
   421  // recursively into out.
   422  //
   423  // relative should be "" to start from the root, and should not have trailing
   424  // slashes.
   425  //
   426  // This should return ErrDirNotFound if the directory isn't found.
   427  //
   428  // It should call callback for each tranche of entries read. These need not be
   429  // returned in any particular order. If callback returns an error then the
   430  // listing will stop immediately.
   431  //
   432  // Don't implement this unless you have a more efficient way of listing
   433  // recursively that doing a directory traversal.
   434  func (f *Fs) ListR(ctx context.Context, relative string, callback fs.ListRCallback) (err error) {
   435  	fs.Debugf(f, "ls -R ./%s", relative)
   436  
   437  	bucketName, bucketPath := f.absolute(relative)
   438  
   439  	defer func() {
   440  		if errors.Is(err, uplink.ErrBucketNotFound) {
   441  			err = fs.ErrorDirNotFound
   442  		}
   443  	}()
   444  
   445  	if bucketName == "" {
   446  		if bucketPath != "" {
   447  			return fs.ErrorListBucketRequired
   448  		}
   449  
   450  		return f.listBucketsR(ctx, callback)
   451  	}
   452  
   453  	return f.listObjectsR(ctx, relative, bucketName, bucketPath, callback)
   454  }
   455  
   456  func (f *Fs) listBucketsR(ctx context.Context, callback fs.ListRCallback) (err error) {
   457  	fs.Debugf(f, "BKT ls -R")
   458  
   459  	buckets := f.project.ListBuckets(ctx, nil)
   460  
   461  	for buckets.Next() {
   462  		bucket := buckets.Item()
   463  
   464  		err = f.listObjectsR(ctx, bucket.Name, bucket.Name, "", callback)
   465  		if err != nil {
   466  			return err
   467  		}
   468  	}
   469  
   470  	return buckets.Err()
   471  }
   472  
   473  func (f *Fs) listObjectsR(ctx context.Context, relative, bucketName, bucketPath string, callback fs.ListRCallback) (err error) {
   474  	fs.Debugf(f, "OBJ ls -R ./%s (%q, %q)", relative, bucketName, bucketPath)
   475  
   476  	opts := &uplink.ListObjectsOptions{
   477  		Prefix:    newPrefix(bucketPath),
   478  		Recursive: true,
   479  
   480  		System: true,
   481  		Custom: true,
   482  	}
   483  
   484  	objects := f.project.ListObjects(ctx, bucketName, opts)
   485  
   486  	for objects.Next() {
   487  		object := objects.Item()
   488  
   489  		err = callback(fs.DirEntries{f.newDirEntry(relative, opts.Prefix, object)})
   490  		if err != nil {
   491  			return err
   492  		}
   493  	}
   494  
   495  	err = objects.Err()
   496  	if err != nil {
   497  		return err
   498  	}
   499  
   500  	return nil
   501  }
   502  
   503  // NewObject finds the Object at relative. If it can't be found it returns the
   504  // error ErrorObjectNotFound.
   505  func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err error) {
   506  	fs.Debugf(f, "stat ./%s", relative)
   507  
   508  	bucketName, bucketPath := f.absolute(relative)
   509  
   510  	object, err := f.project.StatObject(ctx, bucketName, bucketPath)
   511  	if err != nil {
   512  		fs.Debugf(f, "err: %+v", err)
   513  
   514  		if errors.Is(err, uplink.ErrObjectNotFound) {
   515  			return nil, fs.ErrorObjectNotFound
   516  		}
   517  		return nil, err
   518  	}
   519  
   520  	return newObjectFromUplink(f, relative, object), nil
   521  }
   522  
   523  // Put in to the remote path with the modTime given of the given size
   524  //
   525  // When called from outside an Fs by rclone, src.Size() will always be >= 0.
   526  // But for unknown-sized objects (indicated by src.Size() == -1), Put should
   527  // either return an error or upload it properly (rather than e.g. calling
   528  // panic).
   529  //
   530  // May create the object even if it returns an error - if so will return the
   531  // object and the error, otherwise will return nil and the error
   532  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
   533  	return f.put(ctx, in, src, src.Remote(), options...)
   534  }
   535  
   536  func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (_ fs.Object, err error) {
   537  	fs.Debugf(f, "cp input ./%s # %+v %d", remote, options, src.Size())
   538  
   539  	// Reject options we don't support.
   540  	for _, option := range options {
   541  		if option.Mandatory() {
   542  			fs.Errorf(f, "Unsupported mandatory option: %v", option)
   543  
   544  			return nil, errors.New("unsupported mandatory option")
   545  		}
   546  	}
   547  
   548  	bucketName, bucketPath := f.absolute(remote)
   549  
   550  	upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
   551  	if err != nil {
   552  		return nil, err
   553  	}
   554  	defer func() {
   555  		if err != nil {
   556  			aerr := upload.Abort()
   557  			if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) {
   558  				fs.Errorf(f, "cp input ./%s %+v: %+v", remote, options, aerr)
   559  			}
   560  		}
   561  	}()
   562  
   563  	err = upload.SetCustomMetadata(ctx, uplink.CustomMetadata{
   564  		"rclone:mtime": src.ModTime(ctx).Format(time.RFC3339Nano),
   565  	})
   566  	if err != nil {
   567  		return nil, err
   568  	}
   569  
   570  	_, err = io.Copy(upload, in)
   571  	if err != nil {
   572  		if errors.Is(err, uplink.ErrBucketNotFound) {
   573  			// Rclone assumes the backend will create the bucket if not existing yet.
   574  			// Here we create the bucket and return a retry error for rclone to retry the upload.
   575  			_, err = f.project.EnsureBucket(ctx, bucketName)
   576  			if err != nil {
   577  				return nil, err
   578  			}
   579  			return nil, fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
   580  		}
   581  
   582  		err = fserrors.RetryError(err)
   583  		fs.Errorf(f, "cp input ./%s %+v: %+v\n", remote, options, err)
   584  
   585  		return nil, err
   586  	}
   587  
   588  	err = upload.Commit()
   589  	if err != nil {
   590  		if errors.Is(err, uplink.ErrBucketNotFound) {
   591  			// Rclone assumes the backend will create the bucket if not existing yet.
   592  			// Here we create the bucket and return a retry error for rclone to retry the upload.
   593  			_, err = f.project.EnsureBucket(ctx, bucketName)
   594  			if err != nil {
   595  				return nil, err
   596  			}
   597  			err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
   598  		} else if errors.Is(err, uplink.ErrTooManyRequests) {
   599  			// Storj has a rate limit of 1 per second of uploading to the same file.
   600  			// This produces ErrTooManyRequests here, so we wait 1 second and retry.
   601  			//
   602  			// See: https://github.com/storj/uplink/issues/149
   603  			fs.Debugf(f, "uploading too fast - sleeping for 1 second: %v", err)
   604  			time.Sleep(time.Second)
   605  			err = fserrors.RetryError(err)
   606  		}
   607  		return nil, err
   608  	}
   609  
   610  	return newObjectFromUplink(f, remote, upload.Info()), nil
   611  }
   612  
   613  // PutStream uploads to the remote path with the modTime given of indeterminate
   614  // size.
   615  //
   616  // May create the object even if it returns an error - if so will return the
   617  // object and the error, otherwise will return nil and the error.
   618  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
   619  	return f.Put(ctx, in, src, options...)
   620  }
   621  
   622  // Mkdir makes the directory (container, bucket)
   623  //
   624  // Shouldn't return an error if it already exists
   625  func (f *Fs) Mkdir(ctx context.Context, relative string) (err error) {
   626  	fs.Debugf(f, "mkdir -p ./%s", relative)
   627  
   628  	bucketName, _ := f.absolute(relative)
   629  
   630  	_, err = f.project.EnsureBucket(ctx, bucketName)
   631  
   632  	return err
   633  }
   634  
   635  // Rmdir removes the directory (container, bucket)
   636  //
   637  // NOTE: Despite code documentation to the contrary, this method should not
   638  // return an error if the directory does not exist.
   639  func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) {
   640  	fs.Debugf(f, "rmdir ./%s", relative)
   641  
   642  	bucketName, bucketPath := f.absolute(relative)
   643  
   644  	if bucketPath != "" {
   645  		// If we can successfully stat it, then it is an object (and not a prefix).
   646  		_, err := f.project.StatObject(ctx, bucketName, bucketPath)
   647  		if err != nil {
   648  			if errors.Is(err, uplink.ErrObjectNotFound) {
   649  				// At this point we know it is not an object,
   650  				// but we don't know if it is a prefix for one.
   651  				//
   652  				// We check this by doing a listing and if we
   653  				// get any results back, then we know this is a
   654  				// valid prefix (which implies the directory is
   655  				// not empty).
   656  				opts := &uplink.ListObjectsOptions{
   657  					Prefix: newPrefix(bucketPath),
   658  
   659  					System: true,
   660  					Custom: true,
   661  				}
   662  
   663  				objects := f.project.ListObjects(ctx, bucketName, opts)
   664  
   665  				if objects.Next() {
   666  					return fs.ErrorDirectoryNotEmpty
   667  				}
   668  
   669  				return objects.Err()
   670  			}
   671  
   672  			return err
   673  		}
   674  
   675  		return fs.ErrorIsFile
   676  	}
   677  
   678  	_, err = f.project.DeleteBucket(ctx, bucketName)
   679  	if err != nil {
   680  		if errors.Is(err, uplink.ErrBucketNotFound) {
   681  			return fs.ErrorDirNotFound
   682  		}
   683  
   684  		if errors.Is(err, uplink.ErrBucketNotEmpty) {
   685  			return fs.ErrorDirectoryNotEmpty
   686  		}
   687  
   688  		return err
   689  	}
   690  
   691  	return nil
   692  }
   693  
   694  // newPrefix returns a new prefix for listing conforming to the libuplink
   695  // requirements. In particular, libuplink requires a trailing slash for
   696  // listings, but rclone does not always provide one. Further, depending on how
   697  // the path was initially path normalization may have removed it (e.g. a
   698  // trailing slash from the CLI is removed before it ever gets to the backend
   699  // code).
   700  func newPrefix(prefix string) string {
   701  	if prefix == "" {
   702  		return prefix
   703  	}
   704  
   705  	if prefix[len(prefix)-1] == '/' {
   706  		return prefix
   707  	}
   708  
   709  	return prefix + "/"
   710  }
   711  
   712  // Move src to this remote using server-side move operations.
   713  //
   714  // This is stored with the remote path given.
   715  //
   716  // It returns the destination Object and a possible error.
   717  //
   718  // Will only be called if src.Fs().Name() == f.Name()
   719  //
   720  // If it isn't possible then return fs.ErrorCantMove
   721  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   722  	srcObj, ok := src.(*Object)
   723  	if !ok {
   724  		fs.Debugf(src, "Can't move - not same remote type")
   725  		return nil, fs.ErrorCantMove
   726  	}
   727  
   728  	// Move parameters
   729  	srcBucket, srcKey := bucket.Split(srcObj.absolute)
   730  	dstBucket, dstKey := f.absolute(remote)
   731  	options := uplink.MoveObjectOptions{}
   732  
   733  	// Do the move
   734  	err := f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
   735  	if err != nil {
   736  		// Make sure destination bucket exists
   737  		_, err := f.project.EnsureBucket(ctx, dstBucket)
   738  		if err != nil {
   739  			return nil, fmt.Errorf("rename object failed to create destination bucket: %w", err)
   740  		}
   741  		// And try again
   742  		err = f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
   743  		if err != nil {
   744  			return nil, fmt.Errorf("rename object failed: %w", err)
   745  		}
   746  	}
   747  
   748  	// Read the new object
   749  	return f.NewObject(ctx, remote)
   750  }
   751  
   752  // Copy src to this remote using server-side copy operations.
   753  //
   754  // This is stored with the remote path given.
   755  //
   756  // It returns the destination Object and a possible error.
   757  //
   758  // Will only be called if src.Fs().Name() == f.Name()
   759  //
   760  // If it isn't possible then return fs.ErrorCantCopy
   761  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   762  	srcObj, ok := src.(*Object)
   763  	if !ok {
   764  		fs.Debugf(src, "Can't copy - not same remote type")
   765  		return nil, fs.ErrorCantCopy
   766  	}
   767  
   768  	// Copy parameters
   769  	srcBucket, srcKey := bucket.Split(srcObj.absolute)
   770  	dstBucket, dstKey := f.absolute(remote)
   771  	options := uplink.CopyObjectOptions{}
   772  
   773  	// Do the copy
   774  	newObject, err := f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
   775  	if err != nil {
   776  		// Make sure destination bucket exists
   777  		_, err := f.project.EnsureBucket(ctx, dstBucket)
   778  		if err != nil {
   779  			return nil, fmt.Errorf("copy object failed to create destination bucket: %w", err)
   780  		}
   781  		// And try again
   782  		newObject, err = f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
   783  		if err != nil {
   784  			return nil, fmt.Errorf("copy object failed: %w", err)
   785  		}
   786  	}
   787  
   788  	// Return the new object
   789  	return newObjectFromUplink(f, remote, newObject), nil
   790  }
   791  
   792  // Purge all files in the directory specified
   793  //
   794  // Implement this if you have a way of deleting all the files
   795  // quicker than just running Remove() on the result of List()
   796  //
   797  // Return an error if it doesn't exist
   798  func (f *Fs) Purge(ctx context.Context, dir string) error {
   799  	bucket, directory := f.absolute(dir)
   800  	if bucket == "" {
   801  		return errors.New("can't purge from root")
   802  	}
   803  
   804  	if directory == "" {
   805  		_, err := f.project.DeleteBucketWithObjects(ctx, bucket)
   806  		if errors.Is(err, uplink.ErrBucketNotFound) {
   807  			return fs.ErrorDirNotFound
   808  		}
   809  		return err
   810  	}
   811  
   812  	fs.Infof(directory, "Quick delete is available only for entire bucket. Falling back to list and delete.")
   813  	objects := f.project.ListObjects(ctx, bucket,
   814  		&uplink.ListObjectsOptions{
   815  			Prefix:    directory + "/",
   816  			Recursive: true,
   817  		},
   818  	)
   819  	if err := objects.Err(); err != nil {
   820  		return err
   821  	}
   822  
   823  	empty := true
   824  	for objects.Next() {
   825  		empty = false
   826  		_, err := f.project.DeleteObject(ctx, bucket, objects.Item().Key)
   827  		if err != nil {
   828  			return err
   829  		}
   830  		fs.Infof(objects.Item().Key, "Deleted")
   831  	}
   832  
   833  	if empty {
   834  		return fs.ErrorDirNotFound
   835  	}
   836  
   837  	return nil
   838  }
   839  
   840  // PublicLink generates a public link to the remote path (usually readable by anyone)
   841  func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
   842  	bucket, key := f.absolute(remote)
   843  	if bucket == "" {
   844  		return "", errors.New("path must be specified")
   845  	}
   846  
   847  	// Rclone requires that a link is only generated if the remote path exists
   848  	if key == "" {
   849  		_, err := f.project.StatBucket(ctx, bucket)
   850  		if err != nil {
   851  			return "", err
   852  		}
   853  	} else {
   854  		_, err := f.project.StatObject(ctx, bucket, key)
   855  		if err != nil {
   856  			if !errors.Is(err, uplink.ErrObjectNotFound) {
   857  				return "", err
   858  			}
   859  			// No object found, check if there is such a prefix
   860  			iter := f.project.ListObjects(ctx, bucket, &uplink.ListObjectsOptions{Prefix: key + "/"})
   861  			if iter.Err() != nil {
   862  				return "", iter.Err()
   863  			}
   864  			if !iter.Next() {
   865  				return "", err
   866  			}
   867  		}
   868  	}
   869  
   870  	sharedPrefix := uplink.SharePrefix{Bucket: bucket, Prefix: key}
   871  
   872  	permission := uplink.ReadOnlyPermission()
   873  	if expire.IsSet() {
   874  		permission.NotAfter = time.Now().Add(time.Duration(expire))
   875  	}
   876  
   877  	sharedAccess, err := f.access.Share(permission, sharedPrefix)
   878  	if err != nil {
   879  		return "", fmt.Errorf("sharing access to object failed: %w", err)
   880  	}
   881  
   882  	creds, err := (&edge.Config{
   883  		AuthServiceAddress: "auth.storjshare.io:7777",
   884  	}).RegisterAccess(ctx, sharedAccess, &edge.RegisterAccessOptions{Public: true})
   885  	if err != nil {
   886  		return "", fmt.Errorf("creating public link failed: %w", err)
   887  	}
   888  
   889  	return edge.JoinShareURL("https://link.storjshare.io", creds.AccessKeyID, bucket, key, nil)
   890  }