github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/dropbox/dropbox.go (about)

     1  // Package dropbox provides an interface to Dropbox object storage
     2  package dropbox
     3  
     4  // FIXME dropbox for business would be quite easy to add
     5  
     6  /*
     7  The Case folding of PathDisplay problem
     8  
     9  From the docs:
    10  
    11  path_display String. The cased path to be used for display purposes
    12  only. In rare instances the casing will not correctly match the user's
    13  filesystem, but this behavior will match the path provided in the Core
    14  API v1, and at least the last path component will have the correct
    15  casing. Changes to only the casing of paths won't be returned by
    16  list_folder/continue. This field will be null if the file or folder is
    17  not mounted. This field is optional.
    18  
    19  We solve this by not implementing the ListR interface.  The dropbox
    20  remote will recurse directory by directory only using the last element
    21  of path_display and all will be well.
    22  */
    23  
    24  import (
    25  	"context"
    26  	"fmt"
    27  	"io"
    28  	"log"
    29  	"path"
    30  	"regexp"
    31  	"strings"
    32  	"time"
    33  
    34  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
    35  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
    36  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
    37  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
    38  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
    39  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
    40  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
    41  	"github.com/pkg/errors"
    42  	"github.com/rclone/rclone/backend/dropbox/dbhash"
    43  	"github.com/rclone/rclone/fs"
    44  	"github.com/rclone/rclone/fs/config"
    45  	"github.com/rclone/rclone/fs/config/configmap"
    46  	"github.com/rclone/rclone/fs/config/configstruct"
    47  	"github.com/rclone/rclone/fs/config/obscure"
    48  	"github.com/rclone/rclone/fs/fserrors"
    49  	"github.com/rclone/rclone/fs/hash"
    50  	"github.com/rclone/rclone/lib/encoder"
    51  	"github.com/rclone/rclone/lib/oauthutil"
    52  	"github.com/rclone/rclone/lib/pacer"
    53  	"github.com/rclone/rclone/lib/readers"
    54  	"golang.org/x/oauth2"
    55  )
    56  
    57  // Constants
    58  const (
    59  	rcloneClientID              = "5jcck7diasz0rqy"
    60  	rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
    61  	minSleep                    = 10 * time.Millisecond
    62  	maxSleep                    = 2 * time.Second
    63  	decayConstant               = 2 // bigger for slower decay, exponential
    64  	// Upload chunk size - setting too small makes uploads slow.
    65  	// Chunks are buffered into memory for retries.
    66  	//
    67  	// Speed vs chunk size uploading a 1 GB file on 2017-11-22
    68  	//
    69  	// Chunk Size MB, Speed Mbyte/s, % of max
    70  	// 1	1.364	11%
    71  	// 2	2.443	19%
    72  	// 4	4.288	33%
    73  	// 8	6.79	52%
    74  	// 16	8.916	69%
    75  	// 24	10.195	79%
    76  	// 32	10.427	81%
    77  	// 40	10.96	85%
    78  	// 48	11.828	91%
    79  	// 56	11.763	91%
    80  	// 64	12.047	93%
    81  	// 96	12.302	95%
    82  	// 128	12.945	100%
    83  	//
    84  	// Choose 48MB which is 91% of Maximum speed.  rclone by
    85  	// default does 4 transfers so this should use 4*48MB = 192MB
    86  	// by default.
    87  	defaultChunkSize = 48 * fs.MebiByte
    88  	maxChunkSize     = 150 * fs.MebiByte
    89  )
    90  
    91  var (
    92  	// Description of how to auth for this app
    93  	dropboxConfig = &oauth2.Config{
    94  		Scopes: []string{},
    95  		// Endpoint: oauth2.Endpoint{
    96  		// 	AuthURL:  "https://www.dropbox.com/1/oauth2/authorize",
    97  		// 	TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
    98  		// },
    99  		Endpoint:     dropbox.OAuthEndpoint(""),
   100  		ClientID:     rcloneClientID,
   101  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
   102  		RedirectURL:  oauthutil.RedirectLocalhostURL,
   103  	}
   104  	// A regexp matching path names for files Dropbox ignores
   105  	// See https://www.dropbox.com/en/help/145 - Ignored files
   106  	ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
   107  
   108  	// DbHashType is the hash.Type for Dropbox
   109  	DbHashType hash.Type
   110  )
   111  
   112  // Register with Fs
   113  func init() {
   114  	DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
   115  	fs.Register(&fs.RegInfo{
   116  		Name:        "dropbox",
   117  		Description: "Dropbox",
   118  		NewFs:       NewFs,
   119  		Config: func(name string, m configmap.Mapper) {
   120  			opt := oauthutil.Options{
   121  				NoOffline: true,
   122  			}
   123  			err := oauthutil.Config("dropbox", name, m, dropboxConfig, &opt)
   124  			if err != nil {
   125  				log.Fatalf("Failed to configure token: %v", err)
   126  			}
   127  		},
   128  		Options: []fs.Option{{
   129  			Name: config.ConfigClientID,
   130  			Help: "Dropbox App Client Id\nLeave blank normally.",
   131  		}, {
   132  			Name: config.ConfigClientSecret,
   133  			Help: "Dropbox App Client Secret\nLeave blank normally.",
   134  		}, {
   135  			Name: "chunk_size",
   136  			Help: fmt.Sprintf(`Upload chunk size. (< %v).
   137  
   138  Any files larger than this will be uploaded in chunks of this size.
   139  
   140  Note that chunks are buffered in memory (one at a time) so rclone can
   141  deal with retries.  Setting this larger will increase the speed
   142  slightly (at most 10%% for 128MB in tests) at the cost of using more
   143  memory.  It can be set smaller if you are tight on memory.`, maxChunkSize),
   144  			Default:  defaultChunkSize,
   145  			Advanced: true,
   146  		}, {
   147  			Name:     "impersonate",
   148  			Help:     "Impersonate this user when using a business account.",
   149  			Default:  "",
   150  			Advanced: true,
   151  		}, {
   152  			Name:     config.ConfigEncoding,
   153  			Help:     config.ConfigEncodingHelp,
   154  			Advanced: true,
   155  			// https://www.dropbox.com/help/syncing-uploads/files-not-syncing lists / and \
   156  			// as invalid characters.
   157  			// Testing revealed names with trailing spaces and the DEL character don't work.
   158  			// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
   159  			Default: (encoder.Base |
   160  				encoder.EncodeBackSlash |
   161  				encoder.EncodeDel |
   162  				encoder.EncodeRightSpace |
   163  				encoder.EncodeInvalidUtf8),
   164  		}},
   165  	})
   166  }
   167  
   168  // Options defines the configuration for this backend
   169  type Options struct {
   170  	ChunkSize   fs.SizeSuffix        `config:"chunk_size"`
   171  	Impersonate string               `config:"impersonate"`
   172  	Enc         encoder.MultiEncoder `config:"encoding"`
   173  }
   174  
   175  // Fs represents a remote dropbox server
   176  type Fs struct {
   177  	name           string         // name of this remote
   178  	root           string         // the path we are working on
   179  	opt            Options        // parsed options
   180  	features       *fs.Features   // optional features
   181  	srv            files.Client   // the connection to the dropbox server
   182  	sharing        sharing.Client // as above, but for generating sharing links
   183  	users          users.Client   // as above, but for accessing user information
   184  	team           team.Client    // for the Teams API
   185  	slashRoot      string         // root with "/" prefix, lowercase
   186  	slashRootSlash string         // root with "/" prefix and postfix, lowercase
   187  	pacer          *fs.Pacer      // To pace the API calls
   188  	ns             string         // The namespace we are using or "" for none
   189  }
   190  
   191  // Object describes a dropbox object
   192  //
   193  // Dropbox Objects always have full metadata
   194  type Object struct {
   195  	fs      *Fs       // what this object is part of
   196  	remote  string    // The remote path
   197  	bytes   int64     // size of the object
   198  	modTime time.Time // time it was last modified
   199  	hash    string    // content_hash of the object
   200  }
   201  
   202  // ------------------------------------------------------------
   203  
   204  // Name of the remote (as passed into NewFs)
   205  func (f *Fs) Name() string {
   206  	return f.name
   207  }
   208  
   209  // Root of the remote (as passed into NewFs)
   210  func (f *Fs) Root() string {
   211  	return f.root
   212  }
   213  
   214  // String converts this Fs to a string
   215  func (f *Fs) String() string {
   216  	return fmt.Sprintf("Dropbox root '%s'", f.root)
   217  }
   218  
   219  // Features returns the optional features of this Fs
   220  func (f *Fs) Features() *fs.Features {
   221  	return f.features
   222  }
   223  
   224  // shouldRetry returns a boolean as to whether this err deserves to be
   225  // retried.  It returns the err as a convenience
   226  func shouldRetry(err error) (bool, error) {
   227  	if err == nil {
   228  		return false, err
   229  	}
   230  	baseErrString := errors.Cause(err).Error()
   231  	// First check for Insufficient Space
   232  	if strings.Contains(baseErrString, "insufficient_space") {
   233  		return false, fserrors.FatalError(err)
   234  	}
   235  	// Then handle any official Retry-After header from Dropbox's SDK
   236  	switch e := err.(type) {
   237  	case auth.RateLimitAPIError:
   238  		if e.RateLimitError.RetryAfter > 0 {
   239  			fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
   240  			err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
   241  		}
   242  		return true, err
   243  	}
   244  	// Keep old behavior for backward compatibility
   245  	if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
   246  		return true, err
   247  	}
   248  	return fserrors.ShouldRetry(err), err
   249  }
   250  
   251  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   252  	const minChunkSize = fs.Byte
   253  	if cs < minChunkSize {
   254  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   255  	}
   256  	if cs > maxChunkSize {
   257  		return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
   258  	}
   259  	return nil
   260  }
   261  
   262  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   263  	err = checkUploadChunkSize(cs)
   264  	if err == nil {
   265  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   266  	}
   267  	return
   268  }
   269  
   270  // NewFs constructs an Fs from the path, container:path
   271  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   272  	// Parse config into Options struct
   273  	opt := new(Options)
   274  	err := configstruct.Set(m, opt)
   275  	if err != nil {
   276  		return nil, err
   277  	}
   278  	err = checkUploadChunkSize(opt.ChunkSize)
   279  	if err != nil {
   280  		return nil, errors.Wrap(err, "dropbox: chunk size")
   281  	}
   282  
   283  	// Convert the old token if it exists.  The old token was just
   284  	// just a string, the new one is a JSON blob
   285  	oldToken, ok := m.Get(config.ConfigToken)
   286  	oldToken = strings.TrimSpace(oldToken)
   287  	if ok && oldToken != "" && oldToken[0] != '{' {
   288  		fs.Infof(name, "Converting token to new format")
   289  		newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
   290  		err := config.SetValueAndSave(name, config.ConfigToken, newToken)
   291  		if err != nil {
   292  			return nil, errors.Wrap(err, "NewFS convert token")
   293  		}
   294  	}
   295  
   296  	oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
   297  	if err != nil {
   298  		return nil, errors.Wrap(err, "failed to configure dropbox")
   299  	}
   300  
   301  	f := &Fs{
   302  		name:  name,
   303  		opt:   *opt,
   304  		pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   305  	}
   306  	config := dropbox.Config{
   307  		LogLevel:        dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
   308  		Client:          oAuthClient,    // maybe???
   309  		HeaderGenerator: f.headerGenerator,
   310  	}
   311  
   312  	// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
   313  	f.team = team.New(config)
   314  
   315  	if opt.Impersonate != "" {
   316  
   317  		user := team.UserSelectorArg{
   318  			Email: opt.Impersonate,
   319  		}
   320  		user.Tag = "email"
   321  
   322  		members := []*team.UserSelectorArg{&user}
   323  		args := team.NewMembersGetInfoArgs(members)
   324  
   325  		memberIds, err := f.team.MembersGetInfo(args)
   326  
   327  		if err != nil {
   328  			return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
   329  		}
   330  
   331  		config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
   332  	}
   333  
   334  	f.srv = files.New(config)
   335  	f.sharing = sharing.New(config)
   336  	f.users = users.New(config)
   337  	f.features = (&fs.Features{
   338  		CaseInsensitive:         true,
   339  		ReadMimeType:            true,
   340  		CanHaveEmptyDirectories: true,
   341  	}).Fill(f)
   342  	f.setRoot(root)
   343  
   344  	// If root starts with / then use the actual root
   345  	if strings.HasPrefix(root, "/") {
   346  		var acc *users.FullAccount
   347  		err = f.pacer.Call(func() (bool, error) {
   348  			acc, err = f.users.GetCurrentAccount()
   349  			return shouldRetry(err)
   350  		})
   351  		if err != nil {
   352  			return nil, errors.Wrap(err, "get current account failed")
   353  		}
   354  		switch x := acc.RootInfo.(type) {
   355  		case *common.TeamRootInfo:
   356  			f.ns = x.RootNamespaceId
   357  		case *common.UserRootInfo:
   358  			f.ns = x.RootNamespaceId
   359  		default:
   360  			return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
   361  		}
   362  		fs.Debugf(f, "Using root namespace %q", f.ns)
   363  	}
   364  
   365  	// See if the root is actually an object
   366  	_, err = f.getFileMetadata(f.slashRoot)
   367  	if err == nil {
   368  		newRoot := path.Dir(f.root)
   369  		if newRoot == "." {
   370  			newRoot = ""
   371  		}
   372  		f.setRoot(newRoot)
   373  		// return an error with an fs which points to the parent
   374  		return f, fs.ErrorIsFile
   375  	}
   376  	return f, nil
   377  }
   378  
   379  // headerGenerator for dropbox sdk
   380  func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
   381  	if f.ns == "" {
   382  		return map[string]string{}
   383  	}
   384  	return map[string]string{
   385  		"Dropbox-API-Path-Root": `{".tag": "namespace_id", "namespace_id": "` + f.ns + `"}`,
   386  	}
   387  }
   388  
   389  // Sets root in f
   390  func (f *Fs) setRoot(root string) {
   391  	f.root = strings.Trim(root, "/")
   392  	f.slashRoot = "/" + f.root
   393  	f.slashRootSlash = f.slashRoot
   394  	if f.root != "" {
   395  		f.slashRootSlash += "/"
   396  	}
   397  }
   398  
   399  // getMetadata gets the metadata for a file or directory
   400  func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
   401  	err = f.pacer.Call(func() (bool, error) {
   402  		entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
   403  			Path: f.opt.Enc.FromStandardPath(objPath),
   404  		})
   405  		return shouldRetry(err)
   406  	})
   407  	if err != nil {
   408  		switch e := err.(type) {
   409  		case files.GetMetadataAPIError:
   410  			if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
   411  				notFound = true
   412  				err = nil
   413  			}
   414  		}
   415  	}
   416  	return
   417  }
   418  
   419  // getFileMetadata gets the metadata for a file
   420  func (f *Fs) getFileMetadata(filePath string) (fileInfo *files.FileMetadata, err error) {
   421  	entry, notFound, err := f.getMetadata(filePath)
   422  	if err != nil {
   423  		return nil, err
   424  	}
   425  	if notFound {
   426  		return nil, fs.ErrorObjectNotFound
   427  	}
   428  	fileInfo, ok := entry.(*files.FileMetadata)
   429  	if !ok {
   430  		return nil, fs.ErrorNotAFile
   431  	}
   432  	return fileInfo, nil
   433  }
   434  
   435  // getDirMetadata gets the metadata for a directory
   436  func (f *Fs) getDirMetadata(dirPath string) (dirInfo *files.FolderMetadata, err error) {
   437  	entry, notFound, err := f.getMetadata(dirPath)
   438  	if err != nil {
   439  		return nil, err
   440  	}
   441  	if notFound {
   442  		return nil, fs.ErrorDirNotFound
   443  	}
   444  	dirInfo, ok := entry.(*files.FolderMetadata)
   445  	if !ok {
   446  		return nil, fs.ErrorIsFile
   447  	}
   448  	return dirInfo, nil
   449  }
   450  
   451  // Return an Object from a path
   452  //
   453  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   454  func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Object, error) {
   455  	o := &Object{
   456  		fs:     f,
   457  		remote: remote,
   458  	}
   459  	var err error
   460  	if info != nil {
   461  		err = o.setMetadataFromEntry(info)
   462  	} else {
   463  		err = o.readEntryAndSetMetadata()
   464  	}
   465  	if err != nil {
   466  		return nil, err
   467  	}
   468  	return o, nil
   469  }
   470  
   471  // NewObject finds the Object at remote.  If it can't be found
   472  // it returns the error fs.ErrorObjectNotFound.
   473  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   474  	return f.newObjectWithInfo(remote, nil)
   475  }
   476  
   477  // List the objects and directories in dir into entries.  The
   478  // entries can be returned in any order but should be for a
   479  // complete directory.
   480  //
   481  // dir should be "" to list the root, and should not have
   482  // trailing slashes.
   483  //
   484  // This should return ErrDirNotFound if the directory isn't
   485  // found.
   486  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   487  	root := f.slashRoot
   488  	if dir != "" {
   489  		root += "/" + dir
   490  	}
   491  
   492  	started := false
   493  	var res *files.ListFolderResult
   494  	for {
   495  		if !started {
   496  			arg := files.ListFolderArg{
   497  				Path:      f.opt.Enc.FromStandardPath(root),
   498  				Recursive: false,
   499  			}
   500  			if root == "/" {
   501  				arg.Path = "" // Specify root folder as empty string
   502  			}
   503  			err = f.pacer.Call(func() (bool, error) {
   504  				res, err = f.srv.ListFolder(&arg)
   505  				return shouldRetry(err)
   506  			})
   507  			if err != nil {
   508  				switch e := err.(type) {
   509  				case files.ListFolderAPIError:
   510  					if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
   511  						err = fs.ErrorDirNotFound
   512  					}
   513  				}
   514  				return nil, err
   515  			}
   516  			started = true
   517  		} else {
   518  			arg := files.ListFolderContinueArg{
   519  				Cursor: res.Cursor,
   520  			}
   521  			err = f.pacer.Call(func() (bool, error) {
   522  				res, err = f.srv.ListFolderContinue(&arg)
   523  				return shouldRetry(err)
   524  			})
   525  			if err != nil {
   526  				return nil, errors.Wrap(err, "list continue")
   527  			}
   528  		}
   529  		for _, entry := range res.Entries {
   530  			var fileInfo *files.FileMetadata
   531  			var folderInfo *files.FolderMetadata
   532  			var metadata *files.Metadata
   533  			switch info := entry.(type) {
   534  			case *files.FolderMetadata:
   535  				folderInfo = info
   536  				metadata = &info.Metadata
   537  			case *files.FileMetadata:
   538  				fileInfo = info
   539  				metadata = &info.Metadata
   540  			default:
   541  				fs.Errorf(f, "Unknown type %T", entry)
   542  				continue
   543  			}
   544  
   545  			// Only the last element is reliably cased in PathDisplay
   546  			entryPath := metadata.PathDisplay
   547  			leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
   548  			remote := path.Join(dir, leaf)
   549  			if folderInfo != nil {
   550  				d := fs.NewDir(remote, time.Now())
   551  				entries = append(entries, d)
   552  			} else if fileInfo != nil {
   553  				o, err := f.newObjectWithInfo(remote, fileInfo)
   554  				if err != nil {
   555  					return nil, err
   556  				}
   557  				entries = append(entries, o)
   558  			}
   559  		}
   560  		if !res.HasMore {
   561  			break
   562  		}
   563  	}
   564  	return entries, nil
   565  }
   566  
   567  // Put the object
   568  //
   569  // Copy the reader in to the new object which is returned
   570  //
   571  // The new object may have been created if an error is returned
   572  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   573  	// Temporary Object under construction
   574  	o := &Object{
   575  		fs:     f,
   576  		remote: src.Remote(),
   577  	}
   578  	return o, o.Update(ctx, in, src, options...)
   579  }
   580  
   581  // PutStream uploads to the remote path with the modTime given of indeterminate size
   582  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   583  	return f.Put(ctx, in, src, options...)
   584  }
   585  
   586  // Mkdir creates the container if it doesn't exist
   587  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   588  	root := path.Join(f.slashRoot, dir)
   589  
   590  	// can't create or run metadata on root
   591  	if root == "/" {
   592  		return nil
   593  	}
   594  
   595  	// check directory doesn't exist
   596  	_, err := f.getDirMetadata(root)
   597  	if err == nil {
   598  		return nil // directory exists already
   599  	} else if err != fs.ErrorDirNotFound {
   600  		return err // some other error
   601  	}
   602  
   603  	// create it
   604  	arg2 := files.CreateFolderArg{
   605  		Path: f.opt.Enc.FromStandardPath(root),
   606  	}
   607  	err = f.pacer.Call(func() (bool, error) {
   608  		_, err = f.srv.CreateFolderV2(&arg2)
   609  		return shouldRetry(err)
   610  	})
   611  	return err
   612  }
   613  
   614  // Rmdir deletes the container
   615  //
   616  // Returns an error if it isn't empty
   617  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   618  	root := path.Join(f.slashRoot, dir)
   619  
   620  	// can't remove root
   621  	if root == "/" {
   622  		return errors.New("can't remove root directory")
   623  	}
   624  
   625  	// check directory exists
   626  	_, err := f.getDirMetadata(root)
   627  	if err != nil {
   628  		return errors.Wrap(err, "Rmdir")
   629  	}
   630  
   631  	root = f.opt.Enc.FromStandardPath(root)
   632  	// check directory empty
   633  	arg := files.ListFolderArg{
   634  		Path:      root,
   635  		Recursive: false,
   636  	}
   637  	if root == "/" {
   638  		arg.Path = "" // Specify root folder as empty string
   639  	}
   640  	var res *files.ListFolderResult
   641  	err = f.pacer.Call(func() (bool, error) {
   642  		res, err = f.srv.ListFolder(&arg)
   643  		return shouldRetry(err)
   644  	})
   645  	if err != nil {
   646  		return errors.Wrap(err, "Rmdir")
   647  	}
   648  	if len(res.Entries) != 0 {
   649  		return errors.New("directory not empty")
   650  	}
   651  
   652  	// remove it
   653  	err = f.pacer.Call(func() (bool, error) {
   654  		_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
   655  		return shouldRetry(err)
   656  	})
   657  	return err
   658  }
   659  
   660  // Precision returns the precision
   661  func (f *Fs) Precision() time.Duration {
   662  	return time.Second
   663  }
   664  
   665  // Copy src to this remote using server side copy operations.
   666  //
   667  // This is stored with the remote path given
   668  //
   669  // It returns the destination Object and a possible error
   670  //
   671  // Will only be called if src.Fs().Name() == f.Name()
   672  //
   673  // If it isn't possible then return fs.ErrorCantCopy
   674  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   675  	srcObj, ok := src.(*Object)
   676  	if !ok {
   677  		fs.Debugf(src, "Can't copy - not same remote type")
   678  		return nil, fs.ErrorCantCopy
   679  	}
   680  
   681  	// Temporary Object under construction
   682  	dstObj := &Object{
   683  		fs:     f,
   684  		remote: remote,
   685  	}
   686  
   687  	// Copy
   688  	arg := files.RelocationArg{
   689  		RelocationPath: files.RelocationPath{
   690  			FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
   691  			ToPath:   f.opt.Enc.FromStandardPath(dstObj.remotePath()),
   692  		},
   693  	}
   694  	var err error
   695  	var result *files.RelocationResult
   696  	err = f.pacer.Call(func() (bool, error) {
   697  		result, err = f.srv.CopyV2(&arg)
   698  		return shouldRetry(err)
   699  	})
   700  	if err != nil {
   701  		return nil, errors.Wrap(err, "copy failed")
   702  	}
   703  
   704  	// Set the metadata
   705  	fileInfo, ok := result.Metadata.(*files.FileMetadata)
   706  	if !ok {
   707  		return nil, fs.ErrorNotAFile
   708  	}
   709  	err = dstObj.setMetadataFromEntry(fileInfo)
   710  	if err != nil {
   711  		return nil, errors.Wrap(err, "copy failed")
   712  	}
   713  
   714  	return dstObj, nil
   715  }
   716  
   717  // Purge deletes all the files and the container
   718  //
   719  // Optional interface: Only implement this if you have a way of
   720  // deleting all the files quicker than just running Remove() on the
   721  // result of List()
   722  func (f *Fs) Purge(ctx context.Context) (err error) {
   723  	// Let dropbox delete the filesystem tree
   724  	err = f.pacer.Call(func() (bool, error) {
   725  		_, err = f.srv.DeleteV2(&files.DeleteArg{
   726  			Path: f.opt.Enc.FromStandardPath(f.slashRoot),
   727  		})
   728  		return shouldRetry(err)
   729  	})
   730  	return err
   731  }
   732  
   733  // Move src to this remote using server side move operations.
   734  //
   735  // This is stored with the remote path given
   736  //
   737  // It returns the destination Object and a possible error
   738  //
   739  // Will only be called if src.Fs().Name() == f.Name()
   740  //
   741  // If it isn't possible then return fs.ErrorCantMove
   742  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   743  	srcObj, ok := src.(*Object)
   744  	if !ok {
   745  		fs.Debugf(src, "Can't move - not same remote type")
   746  		return nil, fs.ErrorCantMove
   747  	}
   748  
   749  	// Temporary Object under construction
   750  	dstObj := &Object{
   751  		fs:     f,
   752  		remote: remote,
   753  	}
   754  
   755  	// Do the move
   756  	arg := files.RelocationArg{
   757  		RelocationPath: files.RelocationPath{
   758  			FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
   759  			ToPath:   f.opt.Enc.FromStandardPath(dstObj.remotePath()),
   760  		},
   761  	}
   762  	var err error
   763  	var result *files.RelocationResult
   764  	err = f.pacer.Call(func() (bool, error) {
   765  		result, err = f.srv.MoveV2(&arg)
   766  		return shouldRetry(err)
   767  	})
   768  	if err != nil {
   769  		return nil, errors.Wrap(err, "move failed")
   770  	}
   771  
   772  	// Set the metadata
   773  	fileInfo, ok := result.Metadata.(*files.FileMetadata)
   774  	if !ok {
   775  		return nil, fs.ErrorNotAFile
   776  	}
   777  	err = dstObj.setMetadataFromEntry(fileInfo)
   778  	if err != nil {
   779  		return nil, errors.Wrap(err, "move failed")
   780  	}
   781  	return dstObj, nil
   782  }
   783  
   784  // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
   785  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
   786  	absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote))
   787  	fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
   788  	createArg := sharing.CreateSharedLinkWithSettingsArg{
   789  		Path: absPath,
   790  	}
   791  	var linkRes sharing.IsSharedLinkMetadata
   792  	err = f.pacer.Call(func() (bool, error) {
   793  		linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
   794  		return shouldRetry(err)
   795  	})
   796  
   797  	if err != nil && strings.Contains(err.Error(),
   798  		sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
   799  		fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
   800  		listArg := sharing.ListSharedLinksArg{
   801  			Path:       absPath,
   802  			DirectOnly: true,
   803  		}
   804  		var listRes *sharing.ListSharedLinksResult
   805  		err = f.pacer.Call(func() (bool, error) {
   806  			listRes, err = f.sharing.ListSharedLinks(&listArg)
   807  			return shouldRetry(err)
   808  		})
   809  		if err != nil {
   810  			return
   811  		}
   812  		if len(listRes.Links) == 0 {
   813  			err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
   814  			return
   815  		}
   816  		linkRes = listRes.Links[0]
   817  	}
   818  	if err == nil {
   819  		switch res := linkRes.(type) {
   820  		case *sharing.FileLinkMetadata:
   821  			link = res.Url
   822  		case *sharing.FolderLinkMetadata:
   823  			link = res.Url
   824  		default:
   825  			err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
   826  		}
   827  	}
   828  	return
   829  }
   830  
   831  // DirMove moves src, srcRemote to this remote at dstRemote
   832  // using server side move operations.
   833  //
   834  // Will only be called if src.Fs().Name() == f.Name()
   835  //
   836  // If it isn't possible then return fs.ErrorCantDirMove
   837  //
   838  // If destination exists then return fs.ErrorDirExists
   839  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
   840  	srcFs, ok := src.(*Fs)
   841  	if !ok {
   842  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
   843  		return fs.ErrorCantDirMove
   844  	}
   845  	srcPath := path.Join(srcFs.slashRoot, srcRemote)
   846  	dstPath := path.Join(f.slashRoot, dstRemote)
   847  
   848  	// Check if destination exists
   849  	_, err := f.getDirMetadata(dstPath)
   850  	if err == nil {
   851  		return fs.ErrorDirExists
   852  	} else if err != fs.ErrorDirNotFound {
   853  		return err
   854  	}
   855  
   856  	// Make sure the parent directory exists
   857  	// ...apparently not necessary
   858  
   859  	// Do the move
   860  	arg := files.RelocationArg{
   861  		RelocationPath: files.RelocationPath{
   862  			FromPath: f.opt.Enc.FromStandardPath(srcPath),
   863  			ToPath:   f.opt.Enc.FromStandardPath(dstPath),
   864  		},
   865  	}
   866  	err = f.pacer.Call(func() (bool, error) {
   867  		_, err = f.srv.MoveV2(&arg)
   868  		return shouldRetry(err)
   869  	})
   870  	if err != nil {
   871  		return errors.Wrap(err, "MoveDir failed")
   872  	}
   873  
   874  	return nil
   875  }
   876  
   877  // About gets quota information
   878  func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
   879  	var q *users.SpaceUsage
   880  	err = f.pacer.Call(func() (bool, error) {
   881  		q, err = f.users.GetSpaceUsage()
   882  		return shouldRetry(err)
   883  	})
   884  	if err != nil {
   885  		return nil, errors.Wrap(err, "about failed")
   886  	}
   887  	var total uint64
   888  	if q.Allocation != nil {
   889  		if q.Allocation.Individual != nil {
   890  			total += q.Allocation.Individual.Allocated
   891  		}
   892  		if q.Allocation.Team != nil {
   893  			total += q.Allocation.Team.Allocated
   894  		}
   895  	}
   896  	usage = &fs.Usage{
   897  		Total: fs.NewUsageValue(int64(total)),          // quota of bytes that can be used
   898  		Used:  fs.NewUsageValue(int64(q.Used)),         // bytes in use
   899  		Free:  fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
   900  	}
   901  	return usage, nil
   902  }
   903  
   904  // Hashes returns the supported hash sets.
   905  func (f *Fs) Hashes() hash.Set {
   906  	return hash.Set(DbHashType)
   907  }
   908  
   909  // ------------------------------------------------------------
   910  
   911  // Fs returns the parent Fs
   912  func (o *Object) Fs() fs.Info {
   913  	return o.fs
   914  }
   915  
   916  // Return a string version
   917  func (o *Object) String() string {
   918  	if o == nil {
   919  		return "<nil>"
   920  	}
   921  	return o.remote
   922  }
   923  
   924  // Remote returns the remote path
   925  func (o *Object) Remote() string {
   926  	return o.remote
   927  }
   928  
   929  // Hash returns the dropbox special hash
   930  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
   931  	if t != DbHashType {
   932  		return "", hash.ErrUnsupported
   933  	}
   934  	err := o.readMetaData()
   935  	if err != nil {
   936  		return "", errors.Wrap(err, "failed to read hash from metadata")
   937  	}
   938  	return o.hash, nil
   939  }
   940  
   941  // Size returns the size of an object in bytes
   942  func (o *Object) Size() int64 {
   943  	return o.bytes
   944  }
   945  
   946  // setMetadataFromEntry sets the fs data from a files.FileMetadata
   947  //
   948  // This isn't a complete set of metadata and has an inacurate date
   949  func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
   950  	o.bytes = int64(info.Size)
   951  	o.modTime = info.ClientModified
   952  	o.hash = info.ContentHash
   953  	return nil
   954  }
   955  
   956  // Reads the entry for a file from dropbox
   957  func (o *Object) readEntry() (*files.FileMetadata, error) {
   958  	return o.fs.getFileMetadata(o.remotePath())
   959  }
   960  
   961  // Read entry if not set and set metadata from it
   962  func (o *Object) readEntryAndSetMetadata() error {
   963  	// Last resort set time from client
   964  	if !o.modTime.IsZero() {
   965  		return nil
   966  	}
   967  	entry, err := o.readEntry()
   968  	if err != nil {
   969  		return err
   970  	}
   971  	return o.setMetadataFromEntry(entry)
   972  }
   973  
   974  // Returns the remote path for the object
   975  func (o *Object) remotePath() string {
   976  	return o.fs.slashRootSlash + o.remote
   977  }
   978  
   979  // readMetaData gets the info if it hasn't already been fetched
   980  func (o *Object) readMetaData() (err error) {
   981  	if !o.modTime.IsZero() {
   982  		return nil
   983  	}
   984  	// Last resort
   985  	return o.readEntryAndSetMetadata()
   986  }
   987  
   988  // ModTime returns the modification time of the object
   989  //
   990  // It attempts to read the objects mtime and if that isn't present the
   991  // LastModified returned in the http headers
   992  func (o *Object) ModTime(ctx context.Context) time.Time {
   993  	err := o.readMetaData()
   994  	if err != nil {
   995  		fs.Debugf(o, "Failed to read metadata: %v", err)
   996  		return time.Now()
   997  	}
   998  	return o.modTime
   999  }
  1000  
  1001  // SetModTime sets the modification time of the local fs object
  1002  //
  1003  // Commits the datastore
  1004  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1005  	// Dropbox doesn't have a way of doing this so returning this
  1006  	// error will cause the file to be deleted first then
  1007  	// re-uploaded to set the time.
  1008  	return fs.ErrorCantSetModTimeWithoutDelete
  1009  }
  1010  
  1011  // Storable returns whether this object is storable
  1012  func (o *Object) Storable() bool {
  1013  	return true
  1014  }
  1015  
  1016  // Open an object for read
  1017  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1018  	fs.FixRangeOption(options, o.bytes)
  1019  	headers := fs.OpenOptionHeaders(options)
  1020  	arg := files.DownloadArg{
  1021  		Path:         o.fs.opt.Enc.FromStandardPath(o.remotePath()),
  1022  		ExtraHeaders: headers,
  1023  	}
  1024  	err = o.fs.pacer.Call(func() (bool, error) {
  1025  		_, in, err = o.fs.srv.Download(&arg)
  1026  		return shouldRetry(err)
  1027  	})
  1028  
  1029  	switch e := err.(type) {
  1030  	case files.DownloadAPIError:
  1031  		// Don't attempt to retry copyright violation errors
  1032  		if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
  1033  			return nil, fserrors.NoRetryError(err)
  1034  		}
  1035  	}
  1036  
  1037  	return
  1038  }
  1039  
  1040  // uploadChunked uploads the object in parts
  1041  //
  1042  // Will work optimally if size is >= uploadChunkSize. If the size is either
  1043  // unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
  1044  // avoidable request to the Dropbox API that does not carry payload.
  1045  func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
  1046  	chunkSize := int64(o.fs.opt.ChunkSize)
  1047  	chunks := 0
  1048  	if size != -1 {
  1049  		chunks = int(size/chunkSize) + 1
  1050  	}
  1051  	in := readers.NewCountingReader(in0)
  1052  	buf := make([]byte, int(chunkSize))
  1053  
  1054  	fmtChunk := func(cur int, last bool) {
  1055  		if chunks == 0 && last {
  1056  			fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
  1057  		} else if chunks == 0 {
  1058  			fs.Debugf(o, "Streaming chunk %d/unknown", cur)
  1059  		} else {
  1060  			fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
  1061  		}
  1062  	}
  1063  
  1064  	// write the first chunk
  1065  	fmtChunk(1, false)
  1066  	var res *files.UploadSessionStartResult
  1067  	chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
  1068  	err = o.fs.pacer.Call(func() (bool, error) {
  1069  		// seek to the start in case this is a retry
  1070  		if _, err = chunk.Seek(0, io.SeekStart); err != nil {
  1071  			return false, nil
  1072  		}
  1073  		res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
  1074  		return shouldRetry(err)
  1075  	})
  1076  	if err != nil {
  1077  		return nil, err
  1078  	}
  1079  
  1080  	cursor := files.UploadSessionCursor{
  1081  		SessionId: res.SessionId,
  1082  		Offset:    0,
  1083  	}
  1084  	appendArg := files.UploadSessionAppendArg{
  1085  		Cursor: &cursor,
  1086  		Close:  false,
  1087  	}
  1088  
  1089  	// write more whole chunks (if any)
  1090  	currentChunk := 2
  1091  	for {
  1092  		if chunks > 0 && currentChunk >= chunks {
  1093  			// if the size is known, only upload full chunks. Remaining bytes are uploaded with
  1094  			// the UploadSessionFinish request.
  1095  			break
  1096  		} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
  1097  			// if the size is unknown, upload as long as we can read full chunks from the reader.
  1098  			// The UploadSessionFinish request will not contain any payload.
  1099  			break
  1100  		}
  1101  		cursor.Offset = in.BytesRead()
  1102  		fmtChunk(currentChunk, false)
  1103  		chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
  1104  		err = o.fs.pacer.Call(func() (bool, error) {
  1105  			// seek to the start in case this is a retry
  1106  			if _, err = chunk.Seek(0, io.SeekStart); err != nil {
  1107  				return false, nil
  1108  			}
  1109  			err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
  1110  			// after the first chunk is uploaded, we retry everything
  1111  			return err != nil, err
  1112  		})
  1113  		if err != nil {
  1114  			return nil, err
  1115  		}
  1116  		currentChunk++
  1117  	}
  1118  
  1119  	// write the remains
  1120  	cursor.Offset = in.BytesRead()
  1121  	args := &files.UploadSessionFinishArg{
  1122  		Cursor: &cursor,
  1123  		Commit: commitInfo,
  1124  	}
  1125  	fmtChunk(currentChunk, true)
  1126  	chunk = readers.NewRepeatableReaderBuffer(in, buf)
  1127  	err = o.fs.pacer.Call(func() (bool, error) {
  1128  		// seek to the start in case this is a retry
  1129  		if _, err = chunk.Seek(0, io.SeekStart); err != nil {
  1130  			return false, nil
  1131  		}
  1132  		entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
  1133  		// If error is insufficient space then don't retry
  1134  		if e, ok := err.(files.UploadSessionFinishAPIError); ok {
  1135  			if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
  1136  				err = fserrors.NoRetryError(err)
  1137  				return false, err
  1138  			}
  1139  		}
  1140  		// after the first chunk is uploaded, we retry everything
  1141  		return err != nil, err
  1142  	})
  1143  	if err != nil {
  1144  		return nil, err
  1145  	}
  1146  	return entry, nil
  1147  }
  1148  
  1149  // Update the already existing object
  1150  //
  1151  // Copy the reader into the object updating modTime and size
  1152  //
  1153  // The new object may have been created if an error is returned
  1154  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1155  	remote := o.remotePath()
  1156  	if ignoredFiles.MatchString(remote) {
  1157  		return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
  1158  	}
  1159  	commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
  1160  	commitInfo.Mode.Tag = "overwrite"
  1161  	// The Dropbox API only accepts timestamps in UTC with second precision.
  1162  	commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
  1163  
  1164  	size := src.Size()
  1165  	var err error
  1166  	var entry *files.FileMetadata
  1167  	if size > int64(o.fs.opt.ChunkSize) || size == -1 {
  1168  		entry, err = o.uploadChunked(in, commitInfo, size)
  1169  	} else {
  1170  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1171  			entry, err = o.fs.srv.Upload(commitInfo, in)
  1172  			return shouldRetry(err)
  1173  		})
  1174  	}
  1175  	if err != nil {
  1176  		return errors.Wrap(err, "upload failed")
  1177  	}
  1178  	return o.setMetadataFromEntry(entry)
  1179  }
  1180  
  1181  // Remove an object
  1182  func (o *Object) Remove(ctx context.Context) (err error) {
  1183  	err = o.fs.pacer.Call(func() (bool, error) {
  1184  		_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
  1185  			Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
  1186  		})
  1187  		return shouldRetry(err)
  1188  	})
  1189  	return err
  1190  }
  1191  
  1192  // Check the interfaces are satisfied
  1193  var (
  1194  	_ fs.Fs           = (*Fs)(nil)
  1195  	_ fs.Copier       = (*Fs)(nil)
  1196  	_ fs.Purger       = (*Fs)(nil)
  1197  	_ fs.PutStreamer  = (*Fs)(nil)
  1198  	_ fs.Mover        = (*Fs)(nil)
  1199  	_ fs.PublicLinker = (*Fs)(nil)
  1200  	_ fs.DirMover     = (*Fs)(nil)
  1201  	_ fs.Abouter      = (*Fs)(nil)
  1202  	_ fs.Object       = (*Object)(nil)
  1203  )