github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/dropbox/dropbox.go (about)

     1  // Package dropbox provides an interface to Dropbox object storage
     2  package dropbox
     3  
     4  // FIXME dropbox for business would be quite easy to add
     5  
     6  /*
     7  The Case folding of PathDisplay problem
     8  
     9  From the docs:
    10  
    11  path_display String. The cased path to be used for display purposes
    12  only. In rare instances the casing will not correctly match the user's
    13  filesystem, but this behavior will match the path provided in the Core
    14  API v1, and at least the last path component will have the correct
    15  casing. Changes to only the casing of paths won't be returned by
    16  list_folder/continue. This field will be null if the file or folder is
    17  not mounted. This field is optional.
    18  
    19  We solve this by not implementing the ListR interface.  The dropbox
    20  remote will recurse directory by directory only using the last element
    21  of path_display and all will be well.
    22  */
    23  
    24  import (
    25  	"context"
    26  	"fmt"
    27  	"io"
    28  	"log"
    29  	"path"
    30  	"regexp"
    31  	"strings"
    32  	"time"
    33  
    34  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
    35  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
    36  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
    37  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
    38  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
    39  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
    40  	"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
    41  	"github.com/ncw/rclone/fs"
    42  	"github.com/ncw/rclone/fs/config"
    43  	"github.com/ncw/rclone/fs/config/configmap"
    44  	"github.com/ncw/rclone/fs/config/configstruct"
    45  	"github.com/ncw/rclone/fs/config/obscure"
    46  	"github.com/ncw/rclone/fs/fserrors"
    47  	"github.com/ncw/rclone/fs/hash"
    48  	"github.com/ncw/rclone/lib/oauthutil"
    49  	"github.com/ncw/rclone/lib/pacer"
    50  	"github.com/ncw/rclone/lib/readers"
    51  	"github.com/pkg/errors"
    52  	"golang.org/x/oauth2"
    53  )
    54  
    55  // Constants
    56  const (
    57  	rcloneClientID              = "5jcck7diasz0rqy"
    58  	rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
    59  	minSleep                    = 10 * time.Millisecond
    60  	maxSleep                    = 2 * time.Second
    61  	decayConstant               = 2 // bigger for slower decay, exponential
    62  	// Upload chunk size - setting too small makes uploads slow.
    63  	// Chunks are buffered into memory for retries.
    64  	//
    65  	// Speed vs chunk size uploading a 1 GB file on 2017-11-22
    66  	//
    67  	// Chunk Size MB, Speed Mbyte/s, % of max
    68  	// 1	1.364	11%
    69  	// 2	2.443	19%
    70  	// 4	4.288	33%
    71  	// 8	6.79	52%
    72  	// 16	8.916	69%
    73  	// 24	10.195	79%
    74  	// 32	10.427	81%
    75  	// 40	10.96	85%
    76  	// 48	11.828	91%
    77  	// 56	11.763	91%
    78  	// 64	12.047	93%
    79  	// 96	12.302	95%
    80  	// 128	12.945	100%
    81  	//
    82  	// Choose 48MB which is 91% of Maximum speed.  rclone by
    83  	// default does 4 transfers so this should use 4*48MB = 192MB
    84  	// by default.
    85  	defaultChunkSize = 48 * fs.MebiByte
    86  	maxChunkSize     = 150 * fs.MebiByte
    87  )
    88  
    89  var (
    90  	// Description of how to auth for this app
    91  	dropboxConfig = &oauth2.Config{
    92  		Scopes: []string{},
    93  		// Endpoint: oauth2.Endpoint{
    94  		// 	AuthURL:  "https://www.dropbox.com/1/oauth2/authorize",
    95  		// 	TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
    96  		// },
    97  		Endpoint:     dropbox.OAuthEndpoint(""),
    98  		ClientID:     rcloneClientID,
    99  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
   100  		RedirectURL:  oauthutil.RedirectLocalhostURL,
   101  	}
   102  	// A regexp matching path names for files Dropbox ignores
   103  	// See https://www.dropbox.com/en/help/145 - Ignored files
   104  	ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
   105  )
   106  
   107  // Register with Fs
   108  func init() {
   109  	fs.Register(&fs.RegInfo{
   110  		Name:        "dropbox",
   111  		Description: "Dropbox",
   112  		NewFs:       NewFs,
   113  		Config: func(name string, m configmap.Mapper) {
   114  			err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
   115  			if err != nil {
   116  				log.Fatalf("Failed to configure token: %v", err)
   117  			}
   118  		},
   119  		Options: []fs.Option{{
   120  			Name: config.ConfigClientID,
   121  			Help: "Dropbox App Client Id\nLeave blank normally.",
   122  		}, {
   123  			Name: config.ConfigClientSecret,
   124  			Help: "Dropbox App Client Secret\nLeave blank normally.",
   125  		}, {
   126  			Name: "chunk_size",
   127  			Help: fmt.Sprintf(`Upload chunk size. (< %v).
   128  
   129  Any files larger than this will be uploaded in chunks of this size.
   130  
   131  Note that chunks are buffered in memory (one at a time) so rclone can
   132  deal with retries.  Setting this larger will increase the speed
   133  slightly (at most 10%% for 128MB in tests) at the cost of using more
   134  memory.  It can be set smaller if you are tight on memory.`, maxChunkSize),
   135  			Default:  defaultChunkSize,
   136  			Advanced: true,
   137  		}, {
   138  			Name:     "impersonate",
   139  			Help:     "Impersonate this user when using a business account.",
   140  			Default:  "",
   141  			Advanced: true,
   142  		}},
   143  	})
   144  }
   145  
   146  // Options defines the configuration for this backend
   147  type Options struct {
   148  	ChunkSize   fs.SizeSuffix `config:"chunk_size"`
   149  	Impersonate string        `config:"impersonate"`
   150  }
   151  
   152  // Fs represents a remote dropbox server
   153  type Fs struct {
   154  	name           string         // name of this remote
   155  	root           string         // the path we are working on
   156  	opt            Options        // parsed options
   157  	features       *fs.Features   // optional features
   158  	srv            files.Client   // the connection to the dropbox server
   159  	sharing        sharing.Client // as above, but for generating sharing links
   160  	users          users.Client   // as above, but for accessing user information
   161  	team           team.Client    // for the Teams API
   162  	slashRoot      string         // root with "/" prefix, lowercase
   163  	slashRootSlash string         // root with "/" prefix and postfix, lowercase
   164  	pacer          *fs.Pacer      // To pace the API calls
   165  	ns             string         // The namespace we are using or "" for none
   166  }
   167  
   168  // Object describes a dropbox object
   169  //
   170  // Dropbox Objects always have full metadata
   171  type Object struct {
   172  	fs      *Fs       // what this object is part of
   173  	remote  string    // The remote path
   174  	bytes   int64     // size of the object
   175  	modTime time.Time // time it was last modified
   176  	hash    string    // content_hash of the object
   177  }
   178  
   179  // ------------------------------------------------------------
   180  
   181  // Name of the remote (as passed into NewFs)
   182  func (f *Fs) Name() string {
   183  	return f.name
   184  }
   185  
   186  // Root of the remote (as passed into NewFs)
   187  func (f *Fs) Root() string {
   188  	return f.root
   189  }
   190  
   191  // String converts this Fs to a string
   192  func (f *Fs) String() string {
   193  	return fmt.Sprintf("Dropbox root '%s'", f.root)
   194  }
   195  
   196  // Features returns the optional features of this Fs
   197  func (f *Fs) Features() *fs.Features {
   198  	return f.features
   199  }
   200  
   201  // shouldRetry returns a boolean as to whether this err deserves to be
   202  // retried.  It returns the err as a convenience
   203  func shouldRetry(err error) (bool, error) {
   204  	if err == nil {
   205  		return false, err
   206  	}
   207  	baseErrString := errors.Cause(err).Error()
   208  	// handle any official Retry-After header from Dropbox's SDK first
   209  	switch e := err.(type) {
   210  	case auth.RateLimitAPIError:
   211  		if e.RateLimitError.RetryAfter > 0 {
   212  			fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
   213  			err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
   214  		}
   215  		return true, err
   216  	}
   217  	// Keep old behavior for backward compatibility
   218  	if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
   219  		return true, err
   220  	}
   221  	return fserrors.ShouldRetry(err), err
   222  }
   223  
   224  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   225  	const minChunkSize = fs.Byte
   226  	if cs < minChunkSize {
   227  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   228  	}
   229  	if cs > maxChunkSize {
   230  		return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
   231  	}
   232  	return nil
   233  }
   234  
   235  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   236  	err = checkUploadChunkSize(cs)
   237  	if err == nil {
   238  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   239  	}
   240  	return
   241  }
   242  
   243  // NewFs constructs an Fs from the path, container:path
   244  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   245  	// Parse config into Options struct
   246  	opt := new(Options)
   247  	err := configstruct.Set(m, opt)
   248  	if err != nil {
   249  		return nil, err
   250  	}
   251  	err = checkUploadChunkSize(opt.ChunkSize)
   252  	if err != nil {
   253  		return nil, errors.Wrap(err, "dropbox: chunk size")
   254  	}
   255  
   256  	// Convert the old token if it exists.  The old token was just
   257  	// just a string, the new one is a JSON blob
   258  	oldToken, ok := m.Get(config.ConfigToken)
   259  	oldToken = strings.TrimSpace(oldToken)
   260  	if ok && oldToken != "" && oldToken[0] != '{' {
   261  		fs.Infof(name, "Converting token to new format")
   262  		newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
   263  		err := config.SetValueAndSave(name, config.ConfigToken, newToken)
   264  		if err != nil {
   265  			return nil, errors.Wrap(err, "NewFS convert token")
   266  		}
   267  	}
   268  
   269  	oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
   270  	if err != nil {
   271  		return nil, errors.Wrap(err, "failed to configure dropbox")
   272  	}
   273  
   274  	f := &Fs{
   275  		name:  name,
   276  		opt:   *opt,
   277  		pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   278  	}
   279  	config := dropbox.Config{
   280  		LogLevel:        dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
   281  		Client:          oAuthClient,    // maybe???
   282  		HeaderGenerator: f.headerGenerator,
   283  	}
   284  
   285  	// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
   286  	f.team = team.New(config)
   287  
   288  	if opt.Impersonate != "" {
   289  
   290  		user := team.UserSelectorArg{
   291  			Email: opt.Impersonate,
   292  		}
   293  		user.Tag = "email"
   294  
   295  		members := []*team.UserSelectorArg{&user}
   296  		args := team.NewMembersGetInfoArgs(members)
   297  
   298  		memberIds, err := f.team.MembersGetInfo(args)
   299  
   300  		if err != nil {
   301  			return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
   302  		}
   303  
   304  		config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
   305  	}
   306  
   307  	f.srv = files.New(config)
   308  	f.sharing = sharing.New(config)
   309  	f.users = users.New(config)
   310  	f.features = (&fs.Features{
   311  		CaseInsensitive:         true,
   312  		ReadMimeType:            true,
   313  		CanHaveEmptyDirectories: true,
   314  	}).Fill(f)
   315  	f.setRoot(root)
   316  
   317  	// If root starts with / then use the actual root
   318  	if strings.HasPrefix(root, "/") {
   319  		var acc *users.FullAccount
   320  		err = f.pacer.Call(func() (bool, error) {
   321  			acc, err = f.users.GetCurrentAccount()
   322  			return shouldRetry(err)
   323  		})
   324  		if err != nil {
   325  			return nil, errors.Wrap(err, "get current account failed")
   326  		}
   327  		switch x := acc.RootInfo.(type) {
   328  		case *common.TeamRootInfo:
   329  			f.ns = x.RootNamespaceId
   330  		case *common.UserRootInfo:
   331  			f.ns = x.RootNamespaceId
   332  		default:
   333  			return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
   334  		}
   335  		fs.Debugf(f, "Using root namespace %q", f.ns)
   336  	}
   337  
   338  	// See if the root is actually an object
   339  	_, err = f.getFileMetadata(f.slashRoot)
   340  	if err == nil {
   341  		newRoot := path.Dir(f.root)
   342  		if newRoot == "." {
   343  			newRoot = ""
   344  		}
   345  		f.setRoot(newRoot)
   346  		// return an error with an fs which points to the parent
   347  		return f, fs.ErrorIsFile
   348  	}
   349  	return f, nil
   350  }
   351  
   352  // headerGenerator for dropbox sdk
   353  func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
   354  	if f.ns == "" {
   355  		return map[string]string{}
   356  	}
   357  	return map[string]string{
   358  		"Dropbox-API-Path-Root": `{".tag": "namespace_id", "namespace_id": "` + f.ns + `"}`,
   359  	}
   360  }
   361  
   362  // Sets root in f
   363  func (f *Fs) setRoot(root string) {
   364  	f.root = strings.Trim(root, "/")
   365  	f.slashRoot = "/" + f.root
   366  	f.slashRootSlash = f.slashRoot
   367  	if f.root != "" {
   368  		f.slashRootSlash += "/"
   369  	}
   370  }
   371  
   372  // getMetadata gets the metadata for a file or directory
   373  func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
   374  	err = f.pacer.Call(func() (bool, error) {
   375  		entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
   376  		return shouldRetry(err)
   377  	})
   378  	if err != nil {
   379  		switch e := err.(type) {
   380  		case files.GetMetadataAPIError:
   381  			switch e.EndpointError.Path.Tag {
   382  			case files.LookupErrorNotFound:
   383  				notFound = true
   384  				err = nil
   385  			}
   386  		}
   387  	}
   388  	return
   389  }
   390  
   391  // getFileMetadata gets the metadata for a file
   392  func (f *Fs) getFileMetadata(filePath string) (fileInfo *files.FileMetadata, err error) {
   393  	entry, notFound, err := f.getMetadata(filePath)
   394  	if err != nil {
   395  		return nil, err
   396  	}
   397  	if notFound {
   398  		return nil, fs.ErrorObjectNotFound
   399  	}
   400  	fileInfo, ok := entry.(*files.FileMetadata)
   401  	if !ok {
   402  		return nil, fs.ErrorNotAFile
   403  	}
   404  	return fileInfo, nil
   405  }
   406  
   407  // getDirMetadata gets the metadata for a directory
   408  func (f *Fs) getDirMetadata(dirPath string) (dirInfo *files.FolderMetadata, err error) {
   409  	entry, notFound, err := f.getMetadata(dirPath)
   410  	if err != nil {
   411  		return nil, err
   412  	}
   413  	if notFound {
   414  		return nil, fs.ErrorDirNotFound
   415  	}
   416  	dirInfo, ok := entry.(*files.FolderMetadata)
   417  	if !ok {
   418  		return nil, fs.ErrorIsFile
   419  	}
   420  	return dirInfo, nil
   421  }
   422  
   423  // Return an Object from a path
   424  //
   425  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   426  func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Object, error) {
   427  	o := &Object{
   428  		fs:     f,
   429  		remote: remote,
   430  	}
   431  	var err error
   432  	if info != nil {
   433  		err = o.setMetadataFromEntry(info)
   434  	} else {
   435  		err = o.readEntryAndSetMetadata()
   436  	}
   437  	if err != nil {
   438  		return nil, err
   439  	}
   440  	return o, nil
   441  }
   442  
   443  // NewObject finds the Object at remote.  If it can't be found
   444  // it returns the error fs.ErrorObjectNotFound.
   445  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   446  	return f.newObjectWithInfo(remote, nil)
   447  }
   448  
   449  // List the objects and directories in dir into entries.  The
   450  // entries can be returned in any order but should be for a
   451  // complete directory.
   452  //
   453  // dir should be "" to list the root, and should not have
   454  // trailing slashes.
   455  //
   456  // This should return ErrDirNotFound if the directory isn't
   457  // found.
   458  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   459  	root := f.slashRoot
   460  	if dir != "" {
   461  		root += "/" + dir
   462  	}
   463  
   464  	started := false
   465  	var res *files.ListFolderResult
   466  	for {
   467  		if !started {
   468  			arg := files.ListFolderArg{
   469  				Path:      root,
   470  				Recursive: false,
   471  			}
   472  			if root == "/" {
   473  				arg.Path = "" // Specify root folder as empty string
   474  			}
   475  			err = f.pacer.Call(func() (bool, error) {
   476  				res, err = f.srv.ListFolder(&arg)
   477  				return shouldRetry(err)
   478  			})
   479  			if err != nil {
   480  				switch e := err.(type) {
   481  				case files.ListFolderAPIError:
   482  					switch e.EndpointError.Path.Tag {
   483  					case files.LookupErrorNotFound:
   484  						err = fs.ErrorDirNotFound
   485  					}
   486  				}
   487  				return nil, err
   488  			}
   489  			started = true
   490  		} else {
   491  			arg := files.ListFolderContinueArg{
   492  				Cursor: res.Cursor,
   493  			}
   494  			err = f.pacer.Call(func() (bool, error) {
   495  				res, err = f.srv.ListFolderContinue(&arg)
   496  				return shouldRetry(err)
   497  			})
   498  			if err != nil {
   499  				return nil, errors.Wrap(err, "list continue")
   500  			}
   501  		}
   502  		for _, entry := range res.Entries {
   503  			var fileInfo *files.FileMetadata
   504  			var folderInfo *files.FolderMetadata
   505  			var metadata *files.Metadata
   506  			switch info := entry.(type) {
   507  			case *files.FolderMetadata:
   508  				folderInfo = info
   509  				metadata = &info.Metadata
   510  			case *files.FileMetadata:
   511  				fileInfo = info
   512  				metadata = &info.Metadata
   513  			default:
   514  				fs.Errorf(f, "Unknown type %T", entry)
   515  				continue
   516  			}
   517  
   518  			// Only the last element is reliably cased in PathDisplay
   519  			entryPath := metadata.PathDisplay
   520  			leaf := path.Base(entryPath)
   521  			remote := path.Join(dir, leaf)
   522  			if folderInfo != nil {
   523  				d := fs.NewDir(remote, time.Now())
   524  				entries = append(entries, d)
   525  			} else if fileInfo != nil {
   526  				o, err := f.newObjectWithInfo(remote, fileInfo)
   527  				if err != nil {
   528  					return nil, err
   529  				}
   530  				entries = append(entries, o)
   531  			}
   532  		}
   533  		if !res.HasMore {
   534  			break
   535  		}
   536  	}
   537  	return entries, nil
   538  }
   539  
   540  // Put the object
   541  //
   542  // Copy the reader in to the new object which is returned
   543  //
   544  // The new object may have been created if an error is returned
   545  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   546  	// Temporary Object under construction
   547  	o := &Object{
   548  		fs:     f,
   549  		remote: src.Remote(),
   550  	}
   551  	return o, o.Update(ctx, in, src, options...)
   552  }
   553  
   554  // PutStream uploads to the remote path with the modTime given of indeterminate size
   555  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   556  	return f.Put(ctx, in, src, options...)
   557  }
   558  
   559  // Mkdir creates the container if it doesn't exist
   560  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   561  	root := path.Join(f.slashRoot, dir)
   562  
   563  	// can't create or run metadata on root
   564  	if root == "/" {
   565  		return nil
   566  	}
   567  
   568  	// check directory doesn't exist
   569  	_, err := f.getDirMetadata(root)
   570  	if err == nil {
   571  		return nil // directory exists already
   572  	} else if err != fs.ErrorDirNotFound {
   573  		return err // some other error
   574  	}
   575  
   576  	// create it
   577  	arg2 := files.CreateFolderArg{
   578  		Path: root,
   579  	}
   580  	err = f.pacer.Call(func() (bool, error) {
   581  		_, err = f.srv.CreateFolderV2(&arg2)
   582  		return shouldRetry(err)
   583  	})
   584  	return err
   585  }
   586  
   587  // Rmdir deletes the container
   588  //
   589  // Returns an error if it isn't empty
   590  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   591  	root := path.Join(f.slashRoot, dir)
   592  
   593  	// can't remove root
   594  	if root == "/" {
   595  		return errors.New("can't remove root directory")
   596  	}
   597  
   598  	// check directory exists
   599  	_, err := f.getDirMetadata(root)
   600  	if err != nil {
   601  		return errors.Wrap(err, "Rmdir")
   602  	}
   603  
   604  	// check directory empty
   605  	arg := files.ListFolderArg{
   606  		Path:      root,
   607  		Recursive: false,
   608  	}
   609  	if root == "/" {
   610  		arg.Path = "" // Specify root folder as empty string
   611  	}
   612  	var res *files.ListFolderResult
   613  	err = f.pacer.Call(func() (bool, error) {
   614  		res, err = f.srv.ListFolder(&arg)
   615  		return shouldRetry(err)
   616  	})
   617  	if err != nil {
   618  		return errors.Wrap(err, "Rmdir")
   619  	}
   620  	if len(res.Entries) != 0 {
   621  		return errors.New("directory not empty")
   622  	}
   623  
   624  	// remove it
   625  	err = f.pacer.Call(func() (bool, error) {
   626  		_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
   627  		return shouldRetry(err)
   628  	})
   629  	return err
   630  }
   631  
   632  // Precision returns the precision
   633  func (f *Fs) Precision() time.Duration {
   634  	return time.Second
   635  }
   636  
   637  // Copy src to this remote using server side copy operations.
   638  //
   639  // This is stored with the remote path given
   640  //
   641  // It returns the destination Object and a possible error
   642  //
   643  // Will only be called if src.Fs().Name() == f.Name()
   644  //
   645  // If it isn't possible then return fs.ErrorCantCopy
   646  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   647  	srcObj, ok := src.(*Object)
   648  	if !ok {
   649  		fs.Debugf(src, "Can't copy - not same remote type")
   650  		return nil, fs.ErrorCantCopy
   651  	}
   652  
   653  	// Temporary Object under construction
   654  	dstObj := &Object{
   655  		fs:     f,
   656  		remote: remote,
   657  	}
   658  
   659  	// Copy
   660  	arg := files.RelocationArg{}
   661  	arg.FromPath = srcObj.remotePath()
   662  	arg.ToPath = dstObj.remotePath()
   663  	var err error
   664  	var result *files.RelocationResult
   665  	err = f.pacer.Call(func() (bool, error) {
   666  		result, err = f.srv.CopyV2(&arg)
   667  		return shouldRetry(err)
   668  	})
   669  	if err != nil {
   670  		return nil, errors.Wrap(err, "copy failed")
   671  	}
   672  
   673  	// Set the metadata
   674  	fileInfo, ok := result.Metadata.(*files.FileMetadata)
   675  	if !ok {
   676  		return nil, fs.ErrorNotAFile
   677  	}
   678  	err = dstObj.setMetadataFromEntry(fileInfo)
   679  	if err != nil {
   680  		return nil, errors.Wrap(err, "copy failed")
   681  	}
   682  
   683  	return dstObj, nil
   684  }
   685  
   686  // Purge deletes all the files and the container
   687  //
   688  // Optional interface: Only implement this if you have a way of
   689  // deleting all the files quicker than just running Remove() on the
   690  // result of List()
   691  func (f *Fs) Purge(ctx context.Context) (err error) {
   692  	// Let dropbox delete the filesystem tree
   693  	err = f.pacer.Call(func() (bool, error) {
   694  		_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
   695  		return shouldRetry(err)
   696  	})
   697  	return err
   698  }
   699  
   700  // Move src to this remote using server side move operations.
   701  //
   702  // This is stored with the remote path given
   703  //
   704  // It returns the destination Object and a possible error
   705  //
   706  // Will only be called if src.Fs().Name() == f.Name()
   707  //
   708  // If it isn't possible then return fs.ErrorCantMove
   709  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   710  	srcObj, ok := src.(*Object)
   711  	if !ok {
   712  		fs.Debugf(src, "Can't move - not same remote type")
   713  		return nil, fs.ErrorCantMove
   714  	}
   715  
   716  	// Temporary Object under construction
   717  	dstObj := &Object{
   718  		fs:     f,
   719  		remote: remote,
   720  	}
   721  
   722  	// Do the move
   723  	arg := files.RelocationArg{}
   724  	arg.FromPath = srcObj.remotePath()
   725  	arg.ToPath = dstObj.remotePath()
   726  	var err error
   727  	var result *files.RelocationResult
   728  	err = f.pacer.Call(func() (bool, error) {
   729  		result, err = f.srv.MoveV2(&arg)
   730  		return shouldRetry(err)
   731  	})
   732  	if err != nil {
   733  		return nil, errors.Wrap(err, "move failed")
   734  	}
   735  
   736  	// Set the metadata
   737  	fileInfo, ok := result.Metadata.(*files.FileMetadata)
   738  	if !ok {
   739  		return nil, fs.ErrorNotAFile
   740  	}
   741  	err = dstObj.setMetadataFromEntry(fileInfo)
   742  	if err != nil {
   743  		return nil, errors.Wrap(err, "move failed")
   744  	}
   745  	return dstObj, nil
   746  }
   747  
   748  // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
   749  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
   750  	absPath := "/" + path.Join(f.Root(), remote)
   751  	fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
   752  	createArg := sharing.CreateSharedLinkWithSettingsArg{
   753  		Path: absPath,
   754  	}
   755  	var linkRes sharing.IsSharedLinkMetadata
   756  	err = f.pacer.Call(func() (bool, error) {
   757  		linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
   758  		return shouldRetry(err)
   759  	})
   760  
   761  	if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
   762  		fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
   763  		listArg := sharing.ListSharedLinksArg{
   764  			Path:       absPath,
   765  			DirectOnly: true,
   766  		}
   767  		var listRes *sharing.ListSharedLinksResult
   768  		err = f.pacer.Call(func() (bool, error) {
   769  			listRes, err = f.sharing.ListSharedLinks(&listArg)
   770  			return shouldRetry(err)
   771  		})
   772  		if err != nil {
   773  			return
   774  		}
   775  		if len(listRes.Links) == 0 {
   776  			err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
   777  			return
   778  		}
   779  		linkRes = listRes.Links[0]
   780  	}
   781  	if err == nil {
   782  		switch res := linkRes.(type) {
   783  		case *sharing.FileLinkMetadata:
   784  			link = res.Url
   785  		case *sharing.FolderLinkMetadata:
   786  			link = res.Url
   787  		default:
   788  			err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
   789  		}
   790  	}
   791  	return
   792  }
   793  
   794  // DirMove moves src, srcRemote to this remote at dstRemote
   795  // using server side move operations.
   796  //
   797  // Will only be called if src.Fs().Name() == f.Name()
   798  //
   799  // If it isn't possible then return fs.ErrorCantDirMove
   800  //
   801  // If destination exists then return fs.ErrorDirExists
   802  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
   803  	srcFs, ok := src.(*Fs)
   804  	if !ok {
   805  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
   806  		return fs.ErrorCantDirMove
   807  	}
   808  	srcPath := path.Join(srcFs.slashRoot, srcRemote)
   809  	dstPath := path.Join(f.slashRoot, dstRemote)
   810  
   811  	// Check if destination exists
   812  	_, err := f.getDirMetadata(dstPath)
   813  	if err == nil {
   814  		return fs.ErrorDirExists
   815  	} else if err != fs.ErrorDirNotFound {
   816  		return err
   817  	}
   818  
   819  	// Make sure the parent directory exists
   820  	// ...apparently not necessary
   821  
   822  	// Do the move
   823  	arg := files.RelocationArg{}
   824  	arg.FromPath = srcPath
   825  	arg.ToPath = dstPath
   826  	err = f.pacer.Call(func() (bool, error) {
   827  		_, err = f.srv.MoveV2(&arg)
   828  		return shouldRetry(err)
   829  	})
   830  	if err != nil {
   831  		return errors.Wrap(err, "MoveDir failed")
   832  	}
   833  
   834  	return nil
   835  }
   836  
   837  // About gets quota information
   838  func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
   839  	var q *users.SpaceUsage
   840  	err = f.pacer.Call(func() (bool, error) {
   841  		q, err = f.users.GetSpaceUsage()
   842  		return shouldRetry(err)
   843  	})
   844  	if err != nil {
   845  		return nil, errors.Wrap(err, "about failed")
   846  	}
   847  	var total uint64
   848  	if q.Allocation != nil {
   849  		if q.Allocation.Individual != nil {
   850  			total += q.Allocation.Individual.Allocated
   851  		}
   852  		if q.Allocation.Team != nil {
   853  			total += q.Allocation.Team.Allocated
   854  		}
   855  	}
   856  	usage = &fs.Usage{
   857  		Total: fs.NewUsageValue(int64(total)),          // quota of bytes that can be used
   858  		Used:  fs.NewUsageValue(int64(q.Used)),         // bytes in use
   859  		Free:  fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
   860  	}
   861  	return usage, nil
   862  }
   863  
   864  // Hashes returns the supported hash sets.
   865  func (f *Fs) Hashes() hash.Set {
   866  	return hash.Set(hash.Dropbox)
   867  }
   868  
   869  // ------------------------------------------------------------
   870  
   871  // Fs returns the parent Fs
   872  func (o *Object) Fs() fs.Info {
   873  	return o.fs
   874  }
   875  
   876  // Return a string version
   877  func (o *Object) String() string {
   878  	if o == nil {
   879  		return "<nil>"
   880  	}
   881  	return o.remote
   882  }
   883  
   884  // Remote returns the remote path
   885  func (o *Object) Remote() string {
   886  	return o.remote
   887  }
   888  
   889  // Hash returns the dropbox special hash
   890  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
   891  	if t != hash.Dropbox {
   892  		return "", hash.ErrUnsupported
   893  	}
   894  	err := o.readMetaData()
   895  	if err != nil {
   896  		return "", errors.Wrap(err, "failed to read hash from metadata")
   897  	}
   898  	return o.hash, nil
   899  }
   900  
   901  // Size returns the size of an object in bytes
   902  func (o *Object) Size() int64 {
   903  	return o.bytes
   904  }
   905  
   906  // setMetadataFromEntry sets the fs data from a files.FileMetadata
   907  //
   908  // This isn't a complete set of metadata and has an inacurate date
   909  func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
   910  	o.bytes = int64(info.Size)
   911  	o.modTime = info.ClientModified
   912  	o.hash = info.ContentHash
   913  	return nil
   914  }
   915  
   916  // Reads the entry for a file from dropbox
   917  func (o *Object) readEntry() (*files.FileMetadata, error) {
   918  	return o.fs.getFileMetadata(o.remotePath())
   919  }
   920  
   921  // Read entry if not set and set metadata from it
   922  func (o *Object) readEntryAndSetMetadata() error {
   923  	// Last resort set time from client
   924  	if !o.modTime.IsZero() {
   925  		return nil
   926  	}
   927  	entry, err := o.readEntry()
   928  	if err != nil {
   929  		return err
   930  	}
   931  	return o.setMetadataFromEntry(entry)
   932  }
   933  
   934  // Returns the remote path for the object
   935  func (o *Object) remotePath() string {
   936  	return o.fs.slashRootSlash + o.remote
   937  }
   938  
   939  // readMetaData gets the info if it hasn't already been fetched
   940  func (o *Object) readMetaData() (err error) {
   941  	if !o.modTime.IsZero() {
   942  		return nil
   943  	}
   944  	// Last resort
   945  	return o.readEntryAndSetMetadata()
   946  }
   947  
   948  // ModTime returns the modification time of the object
   949  //
   950  // It attempts to read the objects mtime and if that isn't present the
   951  // LastModified returned in the http headers
   952  func (o *Object) ModTime(ctx context.Context) time.Time {
   953  	err := o.readMetaData()
   954  	if err != nil {
   955  		fs.Debugf(o, "Failed to read metadata: %v", err)
   956  		return time.Now()
   957  	}
   958  	return o.modTime
   959  }
   960  
   961  // SetModTime sets the modification time of the local fs object
   962  //
   963  // Commits the datastore
   964  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
   965  	// Dropbox doesn't have a way of doing this so returning this
   966  	// error will cause the file to be deleted first then
   967  	// re-uploaded to set the time.
   968  	return fs.ErrorCantSetModTimeWithoutDelete
   969  }
   970  
   971  // Storable returns whether this object is storable
   972  func (o *Object) Storable() bool {
   973  	return true
   974  }
   975  
   976  // Open an object for read
   977  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
   978  	headers := fs.OpenOptionHeaders(options)
   979  	arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
   980  	err = o.fs.pacer.Call(func() (bool, error) {
   981  		_, in, err = o.fs.srv.Download(&arg)
   982  		return shouldRetry(err)
   983  	})
   984  
   985  	switch e := err.(type) {
   986  	case files.DownloadAPIError:
   987  		// Don't attempt to retry copyright violation errors
   988  		if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
   989  			return nil, fserrors.NoRetryError(err)
   990  		}
   991  	}
   992  
   993  	return
   994  }
   995  
   996  // uploadChunked uploads the object in parts
   997  //
   998  // Will work optimally if size is >= uploadChunkSize. If the size is either
   999  // unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
  1000  // avoidable request to the Dropbox API that does not carry payload.
  1001  func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
  1002  	chunkSize := int64(o.fs.opt.ChunkSize)
  1003  	chunks := 0
  1004  	if size != -1 {
  1005  		chunks = int(size/chunkSize) + 1
  1006  	}
  1007  	in := readers.NewCountingReader(in0)
  1008  	buf := make([]byte, int(chunkSize))
  1009  
  1010  	fmtChunk := func(cur int, last bool) {
  1011  		if chunks == 0 && last {
  1012  			fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
  1013  		} else if chunks == 0 {
  1014  			fs.Debugf(o, "Streaming chunk %d/unknown", cur)
  1015  		} else {
  1016  			fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
  1017  		}
  1018  	}
  1019  
  1020  	// write the first chunk
  1021  	fmtChunk(1, false)
  1022  	var res *files.UploadSessionStartResult
  1023  	chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
  1024  	err = o.fs.pacer.Call(func() (bool, error) {
  1025  		// seek to the start in case this is a retry
  1026  		if _, err = chunk.Seek(0, io.SeekStart); err != nil {
  1027  			return false, nil
  1028  		}
  1029  		res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
  1030  		return shouldRetry(err)
  1031  	})
  1032  	if err != nil {
  1033  		return nil, err
  1034  	}
  1035  
  1036  	cursor := files.UploadSessionCursor{
  1037  		SessionId: res.SessionId,
  1038  		Offset:    0,
  1039  	}
  1040  	appendArg := files.UploadSessionAppendArg{
  1041  		Cursor: &cursor,
  1042  		Close:  false,
  1043  	}
  1044  
  1045  	// write more whole chunks (if any)
  1046  	currentChunk := 2
  1047  	for {
  1048  		if chunks > 0 && currentChunk >= chunks {
  1049  			// if the size is known, only upload full chunks. Remaining bytes are uploaded with
  1050  			// the UploadSessionFinish request.
  1051  			break
  1052  		} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
  1053  			// if the size is unknown, upload as long as we can read full chunks from the reader.
  1054  			// The UploadSessionFinish request will not contain any payload.
  1055  			break
  1056  		}
  1057  		cursor.Offset = in.BytesRead()
  1058  		fmtChunk(currentChunk, false)
  1059  		chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
  1060  		err = o.fs.pacer.Call(func() (bool, error) {
  1061  			// seek to the start in case this is a retry
  1062  			if _, err = chunk.Seek(0, io.SeekStart); err != nil {
  1063  				return false, nil
  1064  			}
  1065  			err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
  1066  			// after the first chunk is uploaded, we retry everything
  1067  			return err != nil, err
  1068  		})
  1069  		if err != nil {
  1070  			return nil, err
  1071  		}
  1072  		currentChunk++
  1073  	}
  1074  
  1075  	// write the remains
  1076  	cursor.Offset = in.BytesRead()
  1077  	args := &files.UploadSessionFinishArg{
  1078  		Cursor: &cursor,
  1079  		Commit: commitInfo,
  1080  	}
  1081  	fmtChunk(currentChunk, true)
  1082  	chunk = readers.NewRepeatableReaderBuffer(in, buf)
  1083  	err = o.fs.pacer.Call(func() (bool, error) {
  1084  		// seek to the start in case this is a retry
  1085  		if _, err = chunk.Seek(0, io.SeekStart); err != nil {
  1086  			return false, nil
  1087  		}
  1088  		entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
  1089  		// after the first chunk is uploaded, we retry everything
  1090  		return err != nil, err
  1091  	})
  1092  	if err != nil {
  1093  		return nil, err
  1094  	}
  1095  	return entry, nil
  1096  }
  1097  
  1098  // Update the already existing object
  1099  //
  1100  // Copy the reader into the object updating modTime and size
  1101  //
  1102  // The new object may have been created if an error is returned
  1103  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1104  	remote := o.remotePath()
  1105  	if ignoredFiles.MatchString(remote) {
  1106  		fs.Logf(o, "File name disallowed - not uploading")
  1107  		return nil
  1108  	}
  1109  	commitInfo := files.NewCommitInfo(o.remotePath())
  1110  	commitInfo.Mode.Tag = "overwrite"
  1111  	// The Dropbox API only accepts timestamps in UTC with second precision.
  1112  	commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
  1113  
  1114  	size := src.Size()
  1115  	var err error
  1116  	var entry *files.FileMetadata
  1117  	if size > int64(o.fs.opt.ChunkSize) || size == -1 {
  1118  		entry, err = o.uploadChunked(in, commitInfo, size)
  1119  	} else {
  1120  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1121  			entry, err = o.fs.srv.Upload(commitInfo, in)
  1122  			return shouldRetry(err)
  1123  		})
  1124  	}
  1125  	if err != nil {
  1126  		return errors.Wrap(err, "upload failed")
  1127  	}
  1128  	return o.setMetadataFromEntry(entry)
  1129  }
  1130  
  1131  // Remove an object
  1132  func (o *Object) Remove(ctx context.Context) (err error) {
  1133  	err = o.fs.pacer.Call(func() (bool, error) {
  1134  		_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
  1135  		return shouldRetry(err)
  1136  	})
  1137  	return err
  1138  }
  1139  
  1140  // Check the interfaces are satisfied
  1141  var (
  1142  	_ fs.Fs           = (*Fs)(nil)
  1143  	_ fs.Copier       = (*Fs)(nil)
  1144  	_ fs.Purger       = (*Fs)(nil)
  1145  	_ fs.PutStreamer  = (*Fs)(nil)
  1146  	_ fs.Mover        = (*Fs)(nil)
  1147  	_ fs.PublicLinker = (*Fs)(nil)
  1148  	_ fs.DirMover     = (*Fs)(nil)
  1149  	_ fs.Abouter      = (*Fs)(nil)
  1150  	_ fs.Object       = (*Object)(nil)
  1151  )