github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/backend/dropbox/dropbox.go (about)

     1  // Package dropbox provides an interface to Dropbox object storage
     2  package dropbox
     3  
     4  // FIXME dropbox for business would be quite easy to add
     5  
     6  /*
     7  The Case folding of PathDisplay problem
     8  
     9  From the docs:
    10  
    11  path_display String. The cased path to be used for display purposes
    12  only. In rare instances the casing will not correctly match the user's
    13  filesystem, but this behavior will match the path provided in the Core
    14  API v1, and at least the last path component will have the correct
    15  casing. Changes to only the casing of paths won't be returned by
    16  list_folder/continue. This field will be null if the file or folder is
    17  not mounted. This field is optional.
    18  
    19  We solve this by not implementing the ListR interface.  The dropbox
    20  remote will recurse directory by directory only using the last element
    21  of path_display and all will be well.
    22  */
    23  
    24  import (
    25  	"context"
    26  	"fmt"
    27  	"io"
    28  	"log"
    29  	"path"
    30  	"regexp"
    31  	"strings"
    32  	"time"
    33  
    34  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
    35  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
    36  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
    37  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
    38  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
    39  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
    40  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
    41  	"github.com/pkg/errors"
    42  	"github.com/rclone/rclone/backend/dropbox/dbhash"
    43  	"github.com/rclone/rclone/fs"
    44  	"github.com/rclone/rclone/fs/config"
    45  	"github.com/rclone/rclone/fs/config/configmap"
    46  	"github.com/rclone/rclone/fs/config/configstruct"
    47  	"github.com/rclone/rclone/fs/config/obscure"
    48  	"github.com/rclone/rclone/fs/fserrors"
    49  	"github.com/rclone/rclone/fs/hash"
    50  	"github.com/rclone/rclone/lib/encoder"
    51  	"github.com/rclone/rclone/lib/oauthutil"
    52  	"github.com/rclone/rclone/lib/pacer"
    53  	"github.com/rclone/rclone/lib/readers"
    54  	"golang.org/x/oauth2"
    55  )
    56  
    57  // Constants
    58  const (
    59  	rcloneClientID              = "5jcck7diasz0rqy"
    60  	rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
    61  	minSleep                    = 10 * time.Millisecond
    62  	maxSleep                    = 2 * time.Second
    63  	decayConstant               = 2 // bigger for slower decay, exponential
    64  	// Upload chunk size - setting too small makes uploads slow.
    65  	// Chunks are buffered into memory for retries.
    66  	//
    67  	// Speed vs chunk size uploading a 1 GB file on 2017-11-22
    68  	//
    69  	// Chunk Size MB, Speed Mbyte/s, % of max
    70  	// 1	1.364	11%
    71  	// 2	2.443	19%
    72  	// 4	4.288	33%
    73  	// 8	6.79	52%
    74  	// 16	8.916	69%
    75  	// 24	10.195	79%
    76  	// 32	10.427	81%
    77  	// 40	10.96	85%
    78  	// 48	11.828	91%
    79  	// 56	11.763	91%
    80  	// 64	12.047	93%
    81  	// 96	12.302	95%
    82  	// 128	12.945	100%
    83  	//
    84  	// Choose 48MB which is 91% of Maximum speed.  rclone by
    85  	// default does 4 transfers so this should use 4*48MB = 192MB
    86  	// by default.
    87  	defaultChunkSize = 48 * fs.MebiByte
    88  	maxChunkSize     = 150 * fs.MebiByte
    89  )
    90  
    91  var (
    92  	// Description of how to auth for this app
    93  	dropboxConfig = &oauth2.Config{
    94  		Scopes: []string{},
    95  		// Endpoint: oauth2.Endpoint{
    96  		// 	AuthURL:  "https://www.dropbox.com/1/oauth2/authorize",
    97  		// 	TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
    98  		// },
    99  		Endpoint:     dropbox.OAuthEndpoint(""),
   100  		ClientID:     rcloneClientID,
   101  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
   102  		RedirectURL:  oauthutil.RedirectLocalhostURL,
   103  	}
   104  	// A regexp matching path names for files Dropbox ignores
   105  	// See https://www.dropbox.com/en/help/145 - Ignored files
   106  	ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
   107  
   108  	// DbHashType is the hash.Type for Dropbox
   109  	DbHashType hash.Type
   110  )
   111  
   112  // Register with Fs
   113  func init() {
   114  	DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
   115  	fs.Register(&fs.RegInfo{
   116  		Name:        "dropbox",
   117  		Description: "Dropbox",
   118  		NewFs:       NewFs,
   119  		Config: func(name string, m configmap.Mapper) {
   120  			err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
   121  			if err != nil {
   122  				log.Fatalf("Failed to configure token: %v", err)
   123  			}
   124  		},
   125  		Options: []fs.Option{{
   126  			Name: config.ConfigClientID,
   127  			Help: "Dropbox App Client Id\nLeave blank normally.",
   128  		}, {
   129  			Name: config.ConfigClientSecret,
   130  			Help: "Dropbox App Client Secret\nLeave blank normally.",
   131  		}, {
   132  			Name: "chunk_size",
   133  			Help: fmt.Sprintf(`Upload chunk size. (< %v).
   134  
   135  Any files larger than this will be uploaded in chunks of this size.
   136  
   137  Note that chunks are buffered in memory (one at a time) so rclone can
   138  deal with retries.  Setting this larger will increase the speed
   139  slightly (at most 10%% for 128MB in tests) at the cost of using more
   140  memory.  It can be set smaller if you are tight on memory.`, maxChunkSize),
   141  			Default:  defaultChunkSize,
   142  			Advanced: true,
   143  		}, {
   144  			Name:     "impersonate",
   145  			Help:     "Impersonate this user when using a business account.",
   146  			Default:  "",
   147  			Advanced: true,
   148  		}, {
   149  			Name:     config.ConfigEncoding,
   150  			Help:     config.ConfigEncodingHelp,
   151  			Advanced: true,
   152  			// https://www.dropbox.com/help/syncing-uploads/files-not-syncing lists / and \
   153  			// as invalid characters.
   154  			// Testing revealed names with trailing spaces and the DEL character don't work.
   155  			// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
   156  			Default: (encoder.Base |
   157  				encoder.EncodeBackSlash |
   158  				encoder.EncodeDel |
   159  				encoder.EncodeRightSpace |
   160  				encoder.EncodeInvalidUtf8),
   161  		}},
   162  	})
   163  }
   164  
   165  // Options defines the configuration for this backend
   166  type Options struct {
   167  	ChunkSize   fs.SizeSuffix        `config:"chunk_size"`
   168  	Impersonate string               `config:"impersonate"`
   169  	Enc         encoder.MultiEncoder `config:"encoding"`
   170  }
   171  
   172  // Fs represents a remote dropbox server
   173  type Fs struct {
   174  	name           string         // name of this remote
   175  	root           string         // the path we are working on
   176  	opt            Options        // parsed options
   177  	features       *fs.Features   // optional features
   178  	srv            files.Client   // the connection to the dropbox server
   179  	sharing        sharing.Client // as above, but for generating sharing links
   180  	users          users.Client   // as above, but for accessing user information
   181  	team           team.Client    // for the Teams API
   182  	slashRoot      string         // root with "/" prefix, lowercase
   183  	slashRootSlash string         // root with "/" prefix and postfix, lowercase
   184  	pacer          *fs.Pacer      // To pace the API calls
   185  	ns             string         // The namespace we are using or "" for none
   186  }
   187  
   188  // Object describes a dropbox object
   189  //
   190  // Dropbox Objects always have full metadata
   191  type Object struct {
   192  	fs      *Fs       // what this object is part of
   193  	remote  string    // The remote path
   194  	bytes   int64     // size of the object
   195  	modTime time.Time // time it was last modified
   196  	hash    string    // content_hash of the object
   197  }
   198  
   199  // ------------------------------------------------------------
   200  
   201  // Name of the remote (as passed into NewFs)
   202  func (f *Fs) Name() string {
   203  	return f.name
   204  }
   205  
   206  // Root of the remote (as passed into NewFs)
   207  func (f *Fs) Root() string {
   208  	return f.root
   209  }
   210  
   211  // String converts this Fs to a string
   212  func (f *Fs) String() string {
   213  	return fmt.Sprintf("Dropbox root '%s'", f.root)
   214  }
   215  
   216  // Features returns the optional features of this Fs
   217  func (f *Fs) Features() *fs.Features {
   218  	return f.features
   219  }
   220  
   221  // shouldRetry returns a boolean as to whether this err deserves to be
   222  // retried.  It returns the err as a convenience
   223  func shouldRetry(err error) (bool, error) {
   224  	if err == nil {
   225  		return false, err
   226  	}
   227  	baseErrString := errors.Cause(err).Error()
   228  	// handle any official Retry-After header from Dropbox's SDK first
   229  	switch e := err.(type) {
   230  	case auth.RateLimitAPIError:
   231  		if e.RateLimitError.RetryAfter > 0 {
   232  			fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
   233  			err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
   234  		}
   235  		return true, err
   236  	}
   237  	// Keep old behavior for backward compatibility
   238  	if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
   239  		return true, err
   240  	}
   241  	return fserrors.ShouldRetry(err), err
   242  }
   243  
   244  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   245  	const minChunkSize = fs.Byte
   246  	if cs < minChunkSize {
   247  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   248  	}
   249  	if cs > maxChunkSize {
   250  		return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
   251  	}
   252  	return nil
   253  }
   254  
   255  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   256  	err = checkUploadChunkSize(cs)
   257  	if err == nil {
   258  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   259  	}
   260  	return
   261  }
   262  
   263  // NewFs constructs an Fs from the path, container:path
   264  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   265  	// Parse config into Options struct
   266  	opt := new(Options)
   267  	err := configstruct.Set(m, opt)
   268  	if err != nil {
   269  		return nil, err
   270  	}
   271  	err = checkUploadChunkSize(opt.ChunkSize)
   272  	if err != nil {
   273  		return nil, errors.Wrap(err, "dropbox: chunk size")
   274  	}
   275  
   276  	// Convert the old token if it exists.  The old token was just
   277  	// just a string, the new one is a JSON blob
   278  	oldToken, ok := m.Get(config.ConfigToken)
   279  	oldToken = strings.TrimSpace(oldToken)
   280  	if ok && oldToken != "" && oldToken[0] != '{' {
   281  		fs.Infof(name, "Converting token to new format")
   282  		newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
   283  		err := config.SetValueAndSave(name, config.ConfigToken, newToken)
   284  		if err != nil {
   285  			return nil, errors.Wrap(err, "NewFS convert token")
   286  		}
   287  	}
   288  
   289  	oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
   290  	if err != nil {
   291  		return nil, errors.Wrap(err, "failed to configure dropbox")
   292  	}
   293  
   294  	f := &Fs{
   295  		name:  name,
   296  		opt:   *opt,
   297  		pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   298  	}
   299  	config := dropbox.Config{
   300  		LogLevel:        dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
   301  		Client:          oAuthClient,    // maybe???
   302  		HeaderGenerator: f.headerGenerator,
   303  	}
   304  
   305  	// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
   306  	f.team = team.New(config)
   307  
   308  	if opt.Impersonate != "" {
   309  
   310  		user := team.UserSelectorArg{
   311  			Email: opt.Impersonate,
   312  		}
   313  		user.Tag = "email"
   314  
   315  		members := []*team.UserSelectorArg{&user}
   316  		args := team.NewMembersGetInfoArgs(members)
   317  
   318  		memberIds, err := f.team.MembersGetInfo(args)
   319  
   320  		if err != nil {
   321  			return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
   322  		}
   323  
   324  		config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
   325  	}
   326  
   327  	f.srv = files.New(config)
   328  	f.sharing = sharing.New(config)
   329  	f.users = users.New(config)
   330  	f.features = (&fs.Features{
   331  		CaseInsensitive:         true,
   332  		ReadMimeType:            true,
   333  		CanHaveEmptyDirectories: true,
   334  	}).Fill(f)
   335  	f.setRoot(root)
   336  
   337  	// If root starts with / then use the actual root
   338  	if strings.HasPrefix(root, "/") {
   339  		var acc *users.FullAccount
   340  		err = f.pacer.Call(func() (bool, error) {
   341  			acc, err = f.users.GetCurrentAccount()
   342  			return shouldRetry(err)
   343  		})
   344  		if err != nil {
   345  			return nil, errors.Wrap(err, "get current account failed")
   346  		}
   347  		switch x := acc.RootInfo.(type) {
   348  		case *common.TeamRootInfo:
   349  			f.ns = x.RootNamespaceId
   350  		case *common.UserRootInfo:
   351  			f.ns = x.RootNamespaceId
   352  		default:
   353  			return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
   354  		}
   355  		fs.Debugf(f, "Using root namespace %q", f.ns)
   356  	}
   357  
   358  	// See if the root is actually an object
   359  	_, err = f.getFileMetadata(f.slashRoot)
   360  	if err == nil {
   361  		newRoot := path.Dir(f.root)
   362  		if newRoot == "." {
   363  			newRoot = ""
   364  		}
   365  		f.setRoot(newRoot)
   366  		// return an error with an fs which points to the parent
   367  		return f, fs.ErrorIsFile
   368  	}
   369  	return f, nil
   370  }
   371  
   372  // headerGenerator for dropbox sdk
   373  func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
   374  	if f.ns == "" {
   375  		return map[string]string{}
   376  	}
   377  	return map[string]string{
   378  		"Dropbox-API-Path-Root": `{".tag": "namespace_id", "namespace_id": "` + f.ns + `"}`,
   379  	}
   380  }
   381  
   382  // Sets root in f
   383  func (f *Fs) setRoot(root string) {
   384  	f.root = strings.Trim(root, "/")
   385  	f.slashRoot = "/" + f.root
   386  	f.slashRootSlash = f.slashRoot
   387  	if f.root != "" {
   388  		f.slashRootSlash += "/"
   389  	}
   390  }
   391  
   392  // getMetadata gets the metadata for a file or directory
   393  func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
   394  	err = f.pacer.Call(func() (bool, error) {
   395  		entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
   396  			Path: f.opt.Enc.FromStandardPath(objPath),
   397  		})
   398  		return shouldRetry(err)
   399  	})
   400  	if err != nil {
   401  		switch e := err.(type) {
   402  		case files.GetMetadataAPIError:
   403  			if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
   404  				notFound = true
   405  				err = nil
   406  			}
   407  		}
   408  	}
   409  	return
   410  }
   411  
   412  // getFileMetadata gets the metadata for a file
   413  func (f *Fs) getFileMetadata(filePath string) (fileInfo *files.FileMetadata, err error) {
   414  	entry, notFound, err := f.getMetadata(filePath)
   415  	if err != nil {
   416  		return nil, err
   417  	}
   418  	if notFound {
   419  		return nil, fs.ErrorObjectNotFound
   420  	}
   421  	fileInfo, ok := entry.(*files.FileMetadata)
   422  	if !ok {
   423  		return nil, fs.ErrorNotAFile
   424  	}
   425  	return fileInfo, nil
   426  }
   427  
   428  // getDirMetadata gets the metadata for a directory
   429  func (f *Fs) getDirMetadata(dirPath string) (dirInfo *files.FolderMetadata, err error) {
   430  	entry, notFound, err := f.getMetadata(dirPath)
   431  	if err != nil {
   432  		return nil, err
   433  	}
   434  	if notFound {
   435  		return nil, fs.ErrorDirNotFound
   436  	}
   437  	dirInfo, ok := entry.(*files.FolderMetadata)
   438  	if !ok {
   439  		return nil, fs.ErrorIsFile
   440  	}
   441  	return dirInfo, nil
   442  }
   443  
   444  // Return an Object from a path
   445  //
   446  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   447  func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Object, error) {
   448  	o := &Object{
   449  		fs:     f,
   450  		remote: remote,
   451  	}
   452  	var err error
   453  	if info != nil {
   454  		err = o.setMetadataFromEntry(info)
   455  	} else {
   456  		err = o.readEntryAndSetMetadata()
   457  	}
   458  	if err != nil {
   459  		return nil, err
   460  	}
   461  	return o, nil
   462  }
   463  
   464  // NewObject finds the Object at remote.  If it can't be found
   465  // it returns the error fs.ErrorObjectNotFound.
   466  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   467  	return f.newObjectWithInfo(remote, nil)
   468  }
   469  
   470  // List the objects and directories in dir into entries.  The
   471  // entries can be returned in any order but should be for a
   472  // complete directory.
   473  //
   474  // dir should be "" to list the root, and should not have
   475  // trailing slashes.
   476  //
   477  // This should return ErrDirNotFound if the directory isn't
   478  // found.
   479  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   480  	root := f.slashRoot
   481  	if dir != "" {
   482  		root += "/" + dir
   483  	}
   484  
   485  	started := false
   486  	var res *files.ListFolderResult
   487  	for {
   488  		if !started {
   489  			arg := files.ListFolderArg{
   490  				Path:      f.opt.Enc.FromStandardPath(root),
   491  				Recursive: false,
   492  			}
   493  			if root == "/" {
   494  				arg.Path = "" // Specify root folder as empty string
   495  			}
   496  			err = f.pacer.Call(func() (bool, error) {
   497  				res, err = f.srv.ListFolder(&arg)
   498  				return shouldRetry(err)
   499  			})
   500  			if err != nil {
   501  				switch e := err.(type) {
   502  				case files.ListFolderAPIError:
   503  					if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
   504  						err = fs.ErrorDirNotFound
   505  					}
   506  				}
   507  				return nil, err
   508  			}
   509  			started = true
   510  		} else {
   511  			arg := files.ListFolderContinueArg{
   512  				Cursor: res.Cursor,
   513  			}
   514  			err = f.pacer.Call(func() (bool, error) {
   515  				res, err = f.srv.ListFolderContinue(&arg)
   516  				return shouldRetry(err)
   517  			})
   518  			if err != nil {
   519  				return nil, errors.Wrap(err, "list continue")
   520  			}
   521  		}
   522  		for _, entry := range res.Entries {
   523  			var fileInfo *files.FileMetadata
   524  			var folderInfo *files.FolderMetadata
   525  			var metadata *files.Metadata
   526  			switch info := entry.(type) {
   527  			case *files.FolderMetadata:
   528  				folderInfo = info
   529  				metadata = &info.Metadata
   530  			case *files.FileMetadata:
   531  				fileInfo = info
   532  				metadata = &info.Metadata
   533  			default:
   534  				fs.Errorf(f, "Unknown type %T", entry)
   535  				continue
   536  			}
   537  
   538  			// Only the last element is reliably cased in PathDisplay
   539  			entryPath := metadata.PathDisplay
   540  			leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
   541  			remote := path.Join(dir, leaf)
   542  			if folderInfo != nil {
   543  				d := fs.NewDir(remote, time.Now())
   544  				entries = append(entries, d)
   545  			} else if fileInfo != nil {
   546  				o, err := f.newObjectWithInfo(remote, fileInfo)
   547  				if err != nil {
   548  					return nil, err
   549  				}
   550  				entries = append(entries, o)
   551  			}
   552  		}
   553  		if !res.HasMore {
   554  			break
   555  		}
   556  	}
   557  	return entries, nil
   558  }
   559  
   560  // Put the object
   561  //
   562  // Copy the reader in to the new object which is returned
   563  //
   564  // The new object may have been created if an error is returned
   565  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   566  	// Temporary Object under construction
   567  	o := &Object{
   568  		fs:     f,
   569  		remote: src.Remote(),
   570  	}
   571  	return o, o.Update(ctx, in, src, options...)
   572  }
   573  
   574  // PutStream uploads to the remote path with the modTime given of indeterminate size
   575  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   576  	return f.Put(ctx, in, src, options...)
   577  }
   578  
   579  // Mkdir creates the container if it doesn't exist
   580  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   581  	root := path.Join(f.slashRoot, dir)
   582  
   583  	// can't create or run metadata on root
   584  	if root == "/" {
   585  		return nil
   586  	}
   587  
   588  	// check directory doesn't exist
   589  	_, err := f.getDirMetadata(root)
   590  	if err == nil {
   591  		return nil // directory exists already
   592  	} else if err != fs.ErrorDirNotFound {
   593  		return err // some other error
   594  	}
   595  
   596  	// create it
   597  	arg2 := files.CreateFolderArg{
   598  		Path: f.opt.Enc.FromStandardPath(root),
   599  	}
   600  	err = f.pacer.Call(func() (bool, error) {
   601  		_, err = f.srv.CreateFolderV2(&arg2)
   602  		return shouldRetry(err)
   603  	})
   604  	return err
   605  }
   606  
   607  // Rmdir deletes the container
   608  //
   609  // Returns an error if it isn't empty
   610  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   611  	root := path.Join(f.slashRoot, dir)
   612  
   613  	// can't remove root
   614  	if root == "/" {
   615  		return errors.New("can't remove root directory")
   616  	}
   617  
   618  	// check directory exists
   619  	_, err := f.getDirMetadata(root)
   620  	if err != nil {
   621  		return errors.Wrap(err, "Rmdir")
   622  	}
   623  
   624  	root = f.opt.Enc.FromStandardPath(root)
   625  	// check directory empty
   626  	arg := files.ListFolderArg{
   627  		Path:      root,
   628  		Recursive: false,
   629  	}
   630  	if root == "/" {
   631  		arg.Path = "" // Specify root folder as empty string
   632  	}
   633  	var res *files.ListFolderResult
   634  	err = f.pacer.Call(func() (bool, error) {
   635  		res, err = f.srv.ListFolder(&arg)
   636  		return shouldRetry(err)
   637  	})
   638  	if err != nil {
   639  		return errors.Wrap(err, "Rmdir")
   640  	}
   641  	if len(res.Entries) != 0 {
   642  		return errors.New("directory not empty")
   643  	}
   644  
   645  	// remove it
   646  	err = f.pacer.Call(func() (bool, error) {
   647  		_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
   648  		return shouldRetry(err)
   649  	})
   650  	return err
   651  }
   652  
   653  // Precision returns the precision
   654  func (f *Fs) Precision() time.Duration {
   655  	return time.Second
   656  }
   657  
   658  // Copy src to this remote using server side copy operations.
   659  //
   660  // This is stored with the remote path given
   661  //
   662  // It returns the destination Object and a possible error
   663  //
   664  // Will only be called if src.Fs().Name() == f.Name()
   665  //
   666  // If it isn't possible then return fs.ErrorCantCopy
   667  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   668  	srcObj, ok := src.(*Object)
   669  	if !ok {
   670  		fs.Debugf(src, "Can't copy - not same remote type")
   671  		return nil, fs.ErrorCantCopy
   672  	}
   673  
   674  	// Temporary Object under construction
   675  	dstObj := &Object{
   676  		fs:     f,
   677  		remote: remote,
   678  	}
   679  
   680  	// Copy
   681  	arg := files.RelocationArg{
   682  		RelocationPath: files.RelocationPath{
   683  			FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
   684  			ToPath:   f.opt.Enc.FromStandardPath(dstObj.remotePath()),
   685  		},
   686  	}
   687  	var err error
   688  	var result *files.RelocationResult
   689  	err = f.pacer.Call(func() (bool, error) {
   690  		result, err = f.srv.CopyV2(&arg)
   691  		return shouldRetry(err)
   692  	})
   693  	if err != nil {
   694  		return nil, errors.Wrap(err, "copy failed")
   695  	}
   696  
   697  	// Set the metadata
   698  	fileInfo, ok := result.Metadata.(*files.FileMetadata)
   699  	if !ok {
   700  		return nil, fs.ErrorNotAFile
   701  	}
   702  	err = dstObj.setMetadataFromEntry(fileInfo)
   703  	if err != nil {
   704  		return nil, errors.Wrap(err, "copy failed")
   705  	}
   706  
   707  	return dstObj, nil
   708  }
   709  
   710  // Purge deletes all the files and the container
   711  //
   712  // Optional interface: Only implement this if you have a way of
   713  // deleting all the files quicker than just running Remove() on the
   714  // result of List()
   715  func (f *Fs) Purge(ctx context.Context) (err error) {
   716  	// Let dropbox delete the filesystem tree
   717  	err = f.pacer.Call(func() (bool, error) {
   718  		_, err = f.srv.DeleteV2(&files.DeleteArg{
   719  			Path: f.opt.Enc.FromStandardPath(f.slashRoot),
   720  		})
   721  		return shouldRetry(err)
   722  	})
   723  	return err
   724  }
   725  
   726  // Move src to this remote using server side move operations.
   727  //
   728  // This is stored with the remote path given
   729  //
   730  // It returns the destination Object and a possible error
   731  //
   732  // Will only be called if src.Fs().Name() == f.Name()
   733  //
   734  // If it isn't possible then return fs.ErrorCantMove
   735  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   736  	srcObj, ok := src.(*Object)
   737  	if !ok {
   738  		fs.Debugf(src, "Can't move - not same remote type")
   739  		return nil, fs.ErrorCantMove
   740  	}
   741  
   742  	// Temporary Object under construction
   743  	dstObj := &Object{
   744  		fs:     f,
   745  		remote: remote,
   746  	}
   747  
   748  	// Do the move
   749  	arg := files.RelocationArg{
   750  		RelocationPath: files.RelocationPath{
   751  			FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
   752  			ToPath:   f.opt.Enc.FromStandardPath(dstObj.remotePath()),
   753  		},
   754  	}
   755  	var err error
   756  	var result *files.RelocationResult
   757  	err = f.pacer.Call(func() (bool, error) {
   758  		result, err = f.srv.MoveV2(&arg)
   759  		return shouldRetry(err)
   760  	})
   761  	if err != nil {
   762  		return nil, errors.Wrap(err, "move failed")
   763  	}
   764  
   765  	// Set the metadata
   766  	fileInfo, ok := result.Metadata.(*files.FileMetadata)
   767  	if !ok {
   768  		return nil, fs.ErrorNotAFile
   769  	}
   770  	err = dstObj.setMetadataFromEntry(fileInfo)
   771  	if err != nil {
   772  		return nil, errors.Wrap(err, "move failed")
   773  	}
   774  	return dstObj, nil
   775  }
   776  
   777  // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
   778  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
   779  	absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote))
   780  	fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
   781  	createArg := sharing.CreateSharedLinkWithSettingsArg{
   782  		Path: absPath,
   783  	}
   784  	var linkRes sharing.IsSharedLinkMetadata
   785  	err = f.pacer.Call(func() (bool, error) {
   786  		linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
   787  		return shouldRetry(err)
   788  	})
   789  
   790  	if err != nil && strings.Contains(err.Error(),
   791  		sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
   792  		fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
   793  		listArg := sharing.ListSharedLinksArg{
   794  			Path:       absPath,
   795  			DirectOnly: true,
   796  		}
   797  		var listRes *sharing.ListSharedLinksResult
   798  		err = f.pacer.Call(func() (bool, error) {
   799  			listRes, err = f.sharing.ListSharedLinks(&listArg)
   800  			return shouldRetry(err)
   801  		})
   802  		if err != nil {
   803  			return
   804  		}
   805  		if len(listRes.Links) == 0 {
   806  			err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
   807  			return
   808  		}
   809  		linkRes = listRes.Links[0]
   810  	}
   811  	if err == nil {
   812  		switch res := linkRes.(type) {
   813  		case *sharing.FileLinkMetadata:
   814  			link = res.Url
   815  		case *sharing.FolderLinkMetadata:
   816  			link = res.Url
   817  		default:
   818  			err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
   819  		}
   820  	}
   821  	return
   822  }
   823  
   824  // DirMove moves src, srcRemote to this remote at dstRemote
   825  // using server side move operations.
   826  //
   827  // Will only be called if src.Fs().Name() == f.Name()
   828  //
   829  // If it isn't possible then return fs.ErrorCantDirMove
   830  //
   831  // If destination exists then return fs.ErrorDirExists
   832  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
   833  	srcFs, ok := src.(*Fs)
   834  	if !ok {
   835  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
   836  		return fs.ErrorCantDirMove
   837  	}
   838  	srcPath := path.Join(srcFs.slashRoot, srcRemote)
   839  	dstPath := path.Join(f.slashRoot, dstRemote)
   840  
   841  	// Check if destination exists
   842  	_, err := f.getDirMetadata(dstPath)
   843  	if err == nil {
   844  		return fs.ErrorDirExists
   845  	} else if err != fs.ErrorDirNotFound {
   846  		return err
   847  	}
   848  
   849  	// Make sure the parent directory exists
   850  	// ...apparently not necessary
   851  
   852  	// Do the move
   853  	arg := files.RelocationArg{
   854  		RelocationPath: files.RelocationPath{
   855  			FromPath: f.opt.Enc.FromStandardPath(srcPath),
   856  			ToPath:   f.opt.Enc.FromStandardPath(dstPath),
   857  		},
   858  	}
   859  	err = f.pacer.Call(func() (bool, error) {
   860  		_, err = f.srv.MoveV2(&arg)
   861  		return shouldRetry(err)
   862  	})
   863  	if err != nil {
   864  		return errors.Wrap(err, "MoveDir failed")
   865  	}
   866  
   867  	return nil
   868  }
   869  
   870  // About gets quota information
   871  func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
   872  	var q *users.SpaceUsage
   873  	err = f.pacer.Call(func() (bool, error) {
   874  		q, err = f.users.GetSpaceUsage()
   875  		return shouldRetry(err)
   876  	})
   877  	if err != nil {
   878  		return nil, errors.Wrap(err, "about failed")
   879  	}
   880  	var total uint64
   881  	if q.Allocation != nil {
   882  		if q.Allocation.Individual != nil {
   883  			total += q.Allocation.Individual.Allocated
   884  		}
   885  		if q.Allocation.Team != nil {
   886  			total += q.Allocation.Team.Allocated
   887  		}
   888  	}
   889  	usage = &fs.Usage{
   890  		Total: fs.NewUsageValue(int64(total)),          // quota of bytes that can be used
   891  		Used:  fs.NewUsageValue(int64(q.Used)),         // bytes in use
   892  		Free:  fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
   893  	}
   894  	return usage, nil
   895  }
   896  
   897  // Hashes returns the supported hash sets.
   898  func (f *Fs) Hashes() hash.Set {
   899  	return hash.Set(DbHashType)
   900  }
   901  
   902  // ------------------------------------------------------------
   903  
   904  // Fs returns the parent Fs
   905  func (o *Object) Fs() fs.Info {
   906  	return o.fs
   907  }
   908  
   909  // Return a string version
   910  func (o *Object) String() string {
   911  	if o == nil {
   912  		return "<nil>"
   913  	}
   914  	return o.remote
   915  }
   916  
   917  // Remote returns the remote path
   918  func (o *Object) Remote() string {
   919  	return o.remote
   920  }
   921  
   922  // Hash returns the dropbox special hash
   923  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
   924  	if t != DbHashType {
   925  		return "", hash.ErrUnsupported
   926  	}
   927  	err := o.readMetaData()
   928  	if err != nil {
   929  		return "", errors.Wrap(err, "failed to read hash from metadata")
   930  	}
   931  	return o.hash, nil
   932  }
   933  
   934  // Size returns the size of an object in bytes
   935  func (o *Object) Size() int64 {
   936  	return o.bytes
   937  }
   938  
   939  // setMetadataFromEntry sets the fs data from a files.FileMetadata
   940  //
   941  // This isn't a complete set of metadata and has an inacurate date
   942  func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
   943  	o.bytes = int64(info.Size)
   944  	o.modTime = info.ClientModified
   945  	o.hash = info.ContentHash
   946  	return nil
   947  }
   948  
   949  // Reads the entry for a file from dropbox
   950  func (o *Object) readEntry() (*files.FileMetadata, error) {
   951  	return o.fs.getFileMetadata(o.remotePath())
   952  }
   953  
   954  // Read entry if not set and set metadata from it
   955  func (o *Object) readEntryAndSetMetadata() error {
   956  	// Last resort set time from client
   957  	if !o.modTime.IsZero() {
   958  		return nil
   959  	}
   960  	entry, err := o.readEntry()
   961  	if err != nil {
   962  		return err
   963  	}
   964  	return o.setMetadataFromEntry(entry)
   965  }
   966  
   967  // Returns the remote path for the object
   968  func (o *Object) remotePath() string {
   969  	return o.fs.slashRootSlash + o.remote
   970  }
   971  
   972  // readMetaData gets the info if it hasn't already been fetched
   973  func (o *Object) readMetaData() (err error) {
   974  	if !o.modTime.IsZero() {
   975  		return nil
   976  	}
   977  	// Last resort
   978  	return o.readEntryAndSetMetadata()
   979  }
   980  
   981  // ModTime returns the modification time of the object
   982  //
   983  // It attempts to read the objects mtime and if that isn't present the
   984  // LastModified returned in the http headers
   985  func (o *Object) ModTime(ctx context.Context) time.Time {
   986  	err := o.readMetaData()
   987  	if err != nil {
   988  		fs.Debugf(o, "Failed to read metadata: %v", err)
   989  		return time.Now()
   990  	}
   991  	return o.modTime
   992  }
   993  
   994  // SetModTime sets the modification time of the local fs object
   995  //
   996  // Commits the datastore
   997  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
   998  	// Dropbox doesn't have a way of doing this so returning this
   999  	// error will cause the file to be deleted first then
  1000  	// re-uploaded to set the time.
  1001  	return fs.ErrorCantSetModTimeWithoutDelete
  1002  }
  1003  
  1004  // Storable returns whether this object is storable
  1005  func (o *Object) Storable() bool {
  1006  	return true
  1007  }
  1008  
  1009  // Open an object for read
  1010  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1011  	fs.FixRangeOption(options, o.bytes)
  1012  	headers := fs.OpenOptionHeaders(options)
  1013  	arg := files.DownloadArg{
  1014  		Path:         o.fs.opt.Enc.FromStandardPath(o.remotePath()),
  1015  		ExtraHeaders: headers,
  1016  	}
  1017  	err = o.fs.pacer.Call(func() (bool, error) {
  1018  		_, in, err = o.fs.srv.Download(&arg)
  1019  		return shouldRetry(err)
  1020  	})
  1021  
  1022  	switch e := err.(type) {
  1023  	case files.DownloadAPIError:
  1024  		// Don't attempt to retry copyright violation errors
  1025  		if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
  1026  			return nil, fserrors.NoRetryError(err)
  1027  		}
  1028  	}
  1029  
  1030  	return
  1031  }
  1032  
  1033  // uploadChunked uploads the object in parts
  1034  //
  1035  // Will work optimally if size is >= uploadChunkSize. If the size is either
  1036  // unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
  1037  // avoidable request to the Dropbox API that does not carry payload.
  1038  func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
  1039  	chunkSize := int64(o.fs.opt.ChunkSize)
  1040  	chunks := 0
  1041  	if size != -1 {
  1042  		chunks = int(size/chunkSize) + 1
  1043  	}
  1044  	in := readers.NewCountingReader(in0)
  1045  	buf := make([]byte, int(chunkSize))
  1046  
  1047  	fmtChunk := func(cur int, last bool) {
  1048  		if chunks == 0 && last {
  1049  			fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
  1050  		} else if chunks == 0 {
  1051  			fs.Debugf(o, "Streaming chunk %d/unknown", cur)
  1052  		} else {
  1053  			fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
  1054  		}
  1055  	}
  1056  
  1057  	// write the first chunk
  1058  	fmtChunk(1, false)
  1059  	var res *files.UploadSessionStartResult
  1060  	chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
  1061  	err = o.fs.pacer.Call(func() (bool, error) {
  1062  		// seek to the start in case this is a retry
  1063  		if _, err = chunk.Seek(0, io.SeekStart); err != nil {
  1064  			return false, nil
  1065  		}
  1066  		res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
  1067  		return shouldRetry(err)
  1068  	})
  1069  	if err != nil {
  1070  		return nil, err
  1071  	}
  1072  
  1073  	cursor := files.UploadSessionCursor{
  1074  		SessionId: res.SessionId,
  1075  		Offset:    0,
  1076  	}
  1077  	appendArg := files.UploadSessionAppendArg{
  1078  		Cursor: &cursor,
  1079  		Close:  false,
  1080  	}
  1081  
  1082  	// write more whole chunks (if any)
  1083  	currentChunk := 2
  1084  	for {
  1085  		if chunks > 0 && currentChunk >= chunks {
  1086  			// if the size is known, only upload full chunks. Remaining bytes are uploaded with
  1087  			// the UploadSessionFinish request.
  1088  			break
  1089  		} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
  1090  			// if the size is unknown, upload as long as we can read full chunks from the reader.
  1091  			// The UploadSessionFinish request will not contain any payload.
  1092  			break
  1093  		}
  1094  		cursor.Offset = in.BytesRead()
  1095  		fmtChunk(currentChunk, false)
  1096  		chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
  1097  		err = o.fs.pacer.Call(func() (bool, error) {
  1098  			// seek to the start in case this is a retry
  1099  			if _, err = chunk.Seek(0, io.SeekStart); err != nil {
  1100  				return false, nil
  1101  			}
  1102  			err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
  1103  			// after the first chunk is uploaded, we retry everything
  1104  			return err != nil, err
  1105  		})
  1106  		if err != nil {
  1107  			return nil, err
  1108  		}
  1109  		currentChunk++
  1110  	}
  1111  
  1112  	// write the remains
  1113  	cursor.Offset = in.BytesRead()
  1114  	args := &files.UploadSessionFinishArg{
  1115  		Cursor: &cursor,
  1116  		Commit: commitInfo,
  1117  	}
  1118  	fmtChunk(currentChunk, true)
  1119  	chunk = readers.NewRepeatableReaderBuffer(in, buf)
  1120  	err = o.fs.pacer.Call(func() (bool, error) {
  1121  		// seek to the start in case this is a retry
  1122  		if _, err = chunk.Seek(0, io.SeekStart); err != nil {
  1123  			return false, nil
  1124  		}
  1125  		entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
  1126  		// If error is insufficient space then don't retry
  1127  		if e, ok := err.(files.UploadSessionFinishAPIError); ok {
  1128  			if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
  1129  				err = fserrors.NoRetryError(err)
  1130  				return false, err
  1131  			}
  1132  		}
  1133  		// after the first chunk is uploaded, we retry everything
  1134  		return err != nil, err
  1135  	})
  1136  	if err != nil {
  1137  		return nil, err
  1138  	}
  1139  	return entry, nil
  1140  }
  1141  
  1142  // Update the already existing object
  1143  //
  1144  // Copy the reader into the object updating modTime and size
  1145  //
  1146  // The new object may have been created if an error is returned
  1147  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1148  	remote := o.remotePath()
  1149  	if ignoredFiles.MatchString(remote) {
  1150  		return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
  1151  	}
  1152  	commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
  1153  	commitInfo.Mode.Tag = "overwrite"
  1154  	// The Dropbox API only accepts timestamps in UTC with second precision.
  1155  	commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
  1156  
  1157  	size := src.Size()
  1158  	var err error
  1159  	var entry *files.FileMetadata
  1160  	if size > int64(o.fs.opt.ChunkSize) || size == -1 {
  1161  		entry, err = o.uploadChunked(in, commitInfo, size)
  1162  	} else {
  1163  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1164  			entry, err = o.fs.srv.Upload(commitInfo, in)
  1165  			return shouldRetry(err)
  1166  		})
  1167  	}
  1168  	if err != nil {
  1169  		return errors.Wrap(err, "upload failed")
  1170  	}
  1171  	return o.setMetadataFromEntry(entry)
  1172  }
  1173  
  1174  // Remove an object
  1175  func (o *Object) Remove(ctx context.Context) (err error) {
  1176  	err = o.fs.pacer.Call(func() (bool, error) {
  1177  		_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
  1178  			Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
  1179  		})
  1180  		return shouldRetry(err)
  1181  	})
  1182  	return err
  1183  }
  1184  
  1185  // Check the interfaces are satisfied
  1186  var (
  1187  	_ fs.Fs           = (*Fs)(nil)
  1188  	_ fs.Copier       = (*Fs)(nil)
  1189  	_ fs.Purger       = (*Fs)(nil)
  1190  	_ fs.PutStreamer  = (*Fs)(nil)
  1191  	_ fs.Mover        = (*Fs)(nil)
  1192  	_ fs.PublicLinker = (*Fs)(nil)
  1193  	_ fs.DirMover     = (*Fs)(nil)
  1194  	_ fs.Abouter      = (*Fs)(nil)
  1195  	_ fs.Object       = (*Object)(nil)
  1196  )