github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/dropbox/dropbox.go (about)

     1  // Package dropbox provides an interface to Dropbox object storage
     2  package dropbox
     3  
     4  // FIXME dropbox for business would be quite easy to add
     5  
     6  /*
     7  The Case folding of PathDisplay problem
     8  
     9  From the docs:
    10  
    11  path_display String. The cased path to be used for display purposes
    12  only. In rare instances the casing will not correctly match the user's
    13  filesystem, but this behavior will match the path provided in the Core
    14  API v1, and at least the last path component will have the correct
    15  casing. Changes to only the casing of paths won't be returned by
    16  list_folder/continue. This field will be null if the file or folder is
    17  not mounted. This field is optional.
    18  
    19  We solve this by not implementing the ListR interface.  The dropbox
    20  remote will recurse directory by directory only using the last element
    21  of path_display and all will be well.
    22  */
    23  
    24  import (
    25  	"context"
    26  	"errors"
    27  	"fmt"
    28  	"io"
    29  	"path"
    30  	"regexp"
    31  	"strings"
    32  	"time"
    33  	"unicode/utf8"
    34  
    35  	"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
    36  	"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth"
    37  	"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common"
    38  	"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
    39  	"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
    40  	"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
    41  	"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
    42  	"github.com/rclone/rclone/backend/dropbox/dbhash"
    43  	"github.com/rclone/rclone/fs"
    44  	"github.com/rclone/rclone/fs/config"
    45  	"github.com/rclone/rclone/fs/config/configmap"
    46  	"github.com/rclone/rclone/fs/config/configstruct"
    47  	"github.com/rclone/rclone/fs/config/obscure"
    48  	"github.com/rclone/rclone/fs/fserrors"
    49  	"github.com/rclone/rclone/fs/hash"
    50  	"github.com/rclone/rclone/lib/batcher"
    51  	"github.com/rclone/rclone/lib/encoder"
    52  	"github.com/rclone/rclone/lib/oauthutil"
    53  	"github.com/rclone/rclone/lib/pacer"
    54  	"github.com/rclone/rclone/lib/readers"
    55  	"golang.org/x/oauth2"
    56  )
    57  
    58  // Constants
    59  const (
    60  	rcloneClientID              = "5jcck7diasz0rqy"
    61  	rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
    62  	defaultMinSleep             = fs.Duration(10 * time.Millisecond)
    63  	maxSleep                    = 2 * time.Second
    64  	decayConstant               = 2 // bigger for slower decay, exponential
    65  	// Upload chunk size - setting too small makes uploads slow.
    66  	// Chunks are buffered into memory for retries.
    67  	//
    68  	// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
    69  	//
    70  	// Chunk Size MiB, Speed MiB/s, % of max
    71  	// 1	1.364	11%
    72  	// 2	2.443	19%
    73  	// 4	4.288	33%
    74  	// 8	6.79	52%
    75  	// 16	8.916	69%
    76  	// 24	10.195	79%
    77  	// 32	10.427	81%
    78  	// 40	10.96	85%
    79  	// 48	11.828	91%
    80  	// 56	11.763	91%
    81  	// 64	12.047	93%
    82  	// 96	12.302	95%
    83  	// 128	12.945	100%
    84  	//
    85  	// Choose 48 MiB which is 91% of Maximum speed.  rclone by
    86  	// default does 4 transfers so this should use 4*48 MiB = 192 MiB
    87  	// by default.
    88  	defaultChunkSize = 48 * fs.Mebi
    89  	maxChunkSize     = 150 * fs.Mebi
    90  	// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
    91  	maxFileNameLength = 255
    92  )
    93  
    94  var (
    95  	// Description of how to auth for this app
    96  	dropboxConfig = &oauth2.Config{
    97  		Scopes: []string{
    98  			"files.metadata.write",
    99  			"files.content.write",
   100  			"files.content.read",
   101  			"sharing.write",
   102  			"account_info.read", // needed for About
   103  			// "file_requests.write",
   104  			// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
   105  			// "team_data.member"
   106  		},
   107  		// Endpoint: oauth2.Endpoint{
   108  		// 	AuthURL:  "https://www.dropbox.com/1/oauth2/authorize",
   109  		// 	TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
   110  		// },
   111  		Endpoint:     dropbox.OAuthEndpoint(""),
   112  		ClientID:     rcloneClientID,
   113  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
   114  		RedirectURL:  oauthutil.RedirectLocalhostURL,
   115  	}
   116  	// A regexp matching path names for files Dropbox ignores
   117  	// See https://www.dropbox.com/en/help/145 - Ignored files
   118  	ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
   119  
   120  	// DbHashType is the hash.Type for Dropbox
   121  	DbHashType hash.Type
   122  
   123  	// Errors
   124  	errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
   125  
   126  	// Configure the batcher
   127  	defaultBatcherOptions = batcher.Options{
   128  		MaxBatchSize:          1000,
   129  		DefaultTimeoutSync:    500 * time.Millisecond,
   130  		DefaultTimeoutAsync:   10 * time.Second,
   131  		DefaultBatchSizeAsync: 100,
   132  	}
   133  )
   134  
   135  // Gets an oauth config with the right scopes
   136  func getOauthConfig(m configmap.Mapper) *oauth2.Config {
   137  	// If not impersonating, use standard scopes
   138  	if impersonate, _ := m.Get("impersonate"); impersonate == "" {
   139  		return dropboxConfig
   140  	}
   141  	// Make a copy of the config
   142  	config := *dropboxConfig
   143  	// Make a copy of the scopes with extra scopes requires appended
   144  	config.Scopes = append(config.Scopes, "members.read", "team_data.member")
   145  	return &config
   146  }
   147  
   148  // Register with Fs
   149  func init() {
   150  	DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
   151  	fs.Register(&fs.RegInfo{
   152  		Name:        "dropbox",
   153  		Description: "Dropbox",
   154  		NewFs:       NewFs,
   155  		Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
   156  			return oauthutil.ConfigOut("", &oauthutil.Options{
   157  				OAuth2Config: getOauthConfig(m),
   158  				NoOffline:    true,
   159  				OAuth2Opts: []oauth2.AuthCodeOption{
   160  					oauth2.SetAuthURLParam("token_access_type", "offline"),
   161  				},
   162  			})
   163  		},
   164  		Options: append(append(oauthutil.SharedOptions, []fs.Option{{
   165  			Name: "chunk_size",
   166  			Help: fmt.Sprintf(`Upload chunk size (< %v).
   167  
   168  Any files larger than this will be uploaded in chunks of this size.
   169  
   170  Note that chunks are buffered in memory (one at a time) so rclone can
   171  deal with retries.  Setting this larger will increase the speed
   172  slightly (at most 10%% for 128 MiB in tests) at the cost of using more
   173  memory.  It can be set smaller if you are tight on memory.`, maxChunkSize),
   174  			Default:  defaultChunkSize,
   175  			Advanced: true,
   176  		}, {
   177  			Name: "impersonate",
   178  			Help: `Impersonate this user when using a business account.
   179  
   180  Note that if you want to use impersonate, you should make sure this
   181  flag is set when running "rclone config" as this will cause rclone to
   182  request the "members.read" scope which it won't normally. This is
   183  needed to lookup a members email address into the internal ID that
   184  dropbox uses in the API.
   185  
   186  Using the "members.read" scope will require a Dropbox Team Admin
   187  to approve during the OAuth flow.
   188  
   189  You will have to use your own App (setting your own client_id and
   190  client_secret) to use this option as currently rclone's default set of
   191  permissions doesn't include "members.read". This can be added once
   192  v1.55 or later is in use everywhere.
   193  `,
   194  			Default:   "",
   195  			Advanced:  true,
   196  			Sensitive: true,
   197  		}, {
   198  			Name: "shared_files",
   199  			Help: `Instructs rclone to work on individual shared files.
   200  
   201  In this mode rclone's features are extremely limited - only list (ls, lsl, etc.) 
   202  operations and read operations (e.g. downloading) are supported in this mode.
   203  All other operations will be disabled.`,
   204  			Default:  false,
   205  			Advanced: true,
   206  		}, {
   207  			Name: "shared_folders",
   208  			Help: `Instructs rclone to work on shared folders.
   209  			
   210  When this flag is used with no path only the List operation is supported and 
   211  all available shared folders will be listed. If you specify a path the first part 
   212  will be interpreted as the name of shared folder. Rclone will then try to mount this 
   213  shared to the root namespace. On success shared folder rclone proceeds normally. 
   214  The shared folder is now pretty much a normal folder and all normal operations 
   215  are supported. 
   216  
   217  Note that we don't unmount the shared folder afterwards so the 
   218  --dropbox-shared-folders can be omitted after the first use of a particular 
   219  shared folder.`,
   220  			Default:  false,
   221  			Advanced: true,
   222  		}, {
   223  			Name:     "pacer_min_sleep",
   224  			Default:  defaultMinSleep,
   225  			Help:     "Minimum time to sleep between API calls.",
   226  			Advanced: true,
   227  		}, {
   228  			Name:     config.ConfigEncoding,
   229  			Help:     config.ConfigEncodingHelp,
   230  			Advanced: true,
   231  			// https://www.dropbox.com/help/syncing-uploads/files-not-syncing lists / and \
   232  			// as invalid characters.
   233  			// Testing revealed names with trailing spaces and the DEL character don't work.
   234  			// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
   235  			Default: encoder.Base |
   236  				encoder.EncodeBackSlash |
   237  				encoder.EncodeDel |
   238  				encoder.EncodeRightSpace |
   239  				encoder.EncodeInvalidUtf8,
   240  		}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
   241  	})
   242  }
   243  
   244  // Options defines the configuration for this backend
   245  type Options struct {
   246  	ChunkSize     fs.SizeSuffix        `config:"chunk_size"`
   247  	Impersonate   string               `config:"impersonate"`
   248  	SharedFiles   bool                 `config:"shared_files"`
   249  	SharedFolders bool                 `config:"shared_folders"`
   250  	BatchMode     string               `config:"batch_mode"`
   251  	BatchSize     int                  `config:"batch_size"`
   252  	BatchTimeout  fs.Duration          `config:"batch_timeout"`
   253  	AsyncBatch    bool                 `config:"async_batch"`
   254  	PacerMinSleep fs.Duration          `config:"pacer_min_sleep"`
   255  	Enc           encoder.MultiEncoder `config:"encoding"`
   256  }
   257  
   258  // Fs represents a remote dropbox server
   259  type Fs struct {
   260  	name           string         // name of this remote
   261  	root           string         // the path we are working on
   262  	opt            Options        // parsed options
   263  	ci             *fs.ConfigInfo // global config
   264  	features       *fs.Features   // optional features
   265  	srv            files.Client   // the connection to the dropbox server
   266  	svc            files.Client   // the connection to the dropbox server (unauthorized)
   267  	sharing        sharing.Client // as above, but for generating sharing links
   268  	users          users.Client   // as above, but for accessing user information
   269  	team           team.Client    // for the Teams API
   270  	slashRoot      string         // root with "/" prefix, lowercase
   271  	slashRootSlash string         // root with "/" prefix and postfix, lowercase
   272  	pacer          *fs.Pacer      // To pace the API calls
   273  	ns             string         // The namespace we are using or "" for none
   274  	batcher        *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
   275  }
   276  
   277  // Object describes a dropbox object
   278  //
   279  // Dropbox Objects always have full metadata
   280  type Object struct {
   281  	fs      *Fs // what this object is part of
   282  	id      string
   283  	url     string
   284  	remote  string    // The remote path
   285  	bytes   int64     // size of the object
   286  	modTime time.Time // time it was last modified
   287  	hash    string    // content_hash of the object
   288  }
   289  
   290  // Name of the remote (as passed into NewFs)
   291  func (f *Fs) Name() string {
   292  	return f.name
   293  }
   294  
   295  // Root of the remote (as passed into NewFs)
   296  func (f *Fs) Root() string {
   297  	return f.root
   298  }
   299  
   300  // String converts this Fs to a string
   301  func (f *Fs) String() string {
   302  	return fmt.Sprintf("Dropbox root '%s'", f.root)
   303  }
   304  
   305  // Features returns the optional features of this Fs
   306  func (f *Fs) Features() *fs.Features {
   307  	return f.features
   308  }
   309  
   310  // shouldRetry returns a boolean as to whether this err deserves to be
   311  // retried.  It returns the err as a convenience
   312  func shouldRetry(ctx context.Context, err error) (bool, error) {
   313  	if fserrors.ContextError(ctx, &err) {
   314  		return false, err
   315  	}
   316  	if err == nil {
   317  		return false, err
   318  	}
   319  	errString := err.Error()
   320  	// First check for specific errors
   321  	if strings.Contains(errString, "insufficient_space") {
   322  		return false, fserrors.FatalError(err)
   323  	} else if strings.Contains(errString, "malformed_path") {
   324  		return false, fserrors.NoRetryError(err)
   325  	}
   326  	// Then handle any official Retry-After header from Dropbox's SDK
   327  	switch e := err.(type) {
   328  	case auth.RateLimitAPIError:
   329  		if e.RateLimitError.RetryAfter > 0 {
   330  			fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
   331  			err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
   332  		}
   333  		return true, err
   334  	}
   335  	// Keep old behavior for backward compatibility
   336  	if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
   337  		return true, err
   338  	}
   339  	return fserrors.ShouldRetry(err), err
   340  }
   341  
   342  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   343  	const minChunkSize = fs.SizeSuffixBase
   344  	if cs < minChunkSize {
   345  		return fmt.Errorf("%s is less than %s", cs, minChunkSize)
   346  	}
   347  	if cs > maxChunkSize {
   348  		return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
   349  	}
   350  	return nil
   351  }
   352  
   353  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   354  	err = checkUploadChunkSize(cs)
   355  	if err == nil {
   356  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   357  	}
   358  	return
   359  }
   360  
   361  // NewFs constructs an Fs from the path, container:path
   362  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
   363  	// Parse config into Options struct
   364  	opt := new(Options)
   365  	err := configstruct.Set(m, opt)
   366  	if err != nil {
   367  		return nil, err
   368  	}
   369  	err = checkUploadChunkSize(opt.ChunkSize)
   370  	if err != nil {
   371  		return nil, fmt.Errorf("dropbox: chunk size: %w", err)
   372  	}
   373  
   374  	// Convert the old token if it exists.  The old token was just
   375  	// just a string, the new one is a JSON blob
   376  	oldToken, ok := m.Get(config.ConfigToken)
   377  	oldToken = strings.TrimSpace(oldToken)
   378  	if ok && oldToken != "" && oldToken[0] != '{' {
   379  		fs.Infof(name, "Converting token to new format")
   380  		newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
   381  		err := config.SetValueAndSave(name, config.ConfigToken, newToken)
   382  		if err != nil {
   383  			return nil, fmt.Errorf("NewFS convert token: %w", err)
   384  		}
   385  	}
   386  
   387  	oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
   388  	if err != nil {
   389  		return nil, fmt.Errorf("failed to configure dropbox: %w", err)
   390  	}
   391  
   392  	ci := fs.GetConfig(ctx)
   393  
   394  	f := &Fs{
   395  		name:  name,
   396  		opt:   *opt,
   397  		ci:    ci,
   398  		pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   399  	}
   400  	batcherOptions := defaultBatcherOptions
   401  	batcherOptions.Mode = f.opt.BatchMode
   402  	batcherOptions.Size = f.opt.BatchSize
   403  	batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
   404  	f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
   405  	if err != nil {
   406  		return nil, err
   407  	}
   408  	cfg := dropbox.Config{
   409  		LogLevel:        dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
   410  		Client:          oAuthClient,    // maybe???
   411  		HeaderGenerator: f.headerGenerator,
   412  	}
   413  
   414  	// unauthorized config for endpoints that fail with auth
   415  	ucfg := dropbox.Config{
   416  		LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
   417  	}
   418  
   419  	// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
   420  	f.team = team.New(cfg)
   421  
   422  	if opt.Impersonate != "" {
   423  		user := team.UserSelectorArg{
   424  			Email: opt.Impersonate,
   425  		}
   426  		user.Tag = "email"
   427  
   428  		members := []*team.UserSelectorArg{&user}
   429  		args := team.NewMembersGetInfoArgs(members)
   430  
   431  		memberIDs, err := f.team.MembersGetInfo(args)
   432  		if err != nil {
   433  			return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
   434  		}
   435  		if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil {
   436  			return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
   437  		}
   438  
   439  		cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId
   440  	}
   441  
   442  	f.srv = files.New(cfg)
   443  	f.svc = files.New(ucfg)
   444  	f.sharing = sharing.New(cfg)
   445  	f.users = users.New(cfg)
   446  	f.features = (&fs.Features{
   447  		CaseInsensitive:         true,
   448  		ReadMimeType:            false,
   449  		CanHaveEmptyDirectories: true,
   450  	})
   451  
   452  	// do not fill features yet
   453  	if f.opt.SharedFiles {
   454  		f.setRoot(root)
   455  		if f.root == "" {
   456  			return f, nil
   457  		}
   458  		_, err := f.findSharedFile(ctx, f.root)
   459  		f.root = ""
   460  		if err == nil {
   461  			return f, fs.ErrorIsFile
   462  		}
   463  		return f, nil
   464  	}
   465  
   466  	if f.opt.SharedFolders {
   467  		f.setRoot(root)
   468  		if f.root == "" {
   469  			return f, nil // our root it empty so we probably want to list shared folders
   470  		}
   471  
   472  		dir := path.Dir(f.root)
   473  		if dir == "." {
   474  			dir = f.root
   475  		}
   476  
   477  		// root is not empty so we have find the right shared folder if it exists
   478  		id, err := f.findSharedFolder(ctx, dir)
   479  		if err != nil {
   480  			// if we didn't find the specified shared folder we have to bail out here
   481  			return nil, err
   482  		}
   483  		// we found the specified shared folder so let's mount it
   484  		// this will add it to the users normal root namespace and allows us
   485  		// to actually perform operations on it using the normal api endpoints.
   486  		err = f.mountSharedFolder(ctx, id)
   487  		if err != nil {
   488  			switch e := err.(type) {
   489  			case sharing.MountFolderAPIError:
   490  				if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
   491  					return nil, err
   492  				}
   493  			default:
   494  				return nil, err
   495  			}
   496  			// if the mount failed we have to abort here
   497  		}
   498  		// if the mount succeeded it's now a normal folder in the users root namespace
   499  		// we disable shared folder mode and proceed normally
   500  		f.opt.SharedFolders = false
   501  	}
   502  
   503  	f.features.Fill(ctx, f)
   504  
   505  	// If root starts with / then use the actual root
   506  	if strings.HasPrefix(root, "/") {
   507  		var acc *users.FullAccount
   508  		err = f.pacer.Call(func() (bool, error) {
   509  			acc, err = f.users.GetCurrentAccount()
   510  			return shouldRetry(ctx, err)
   511  		})
   512  		if err != nil {
   513  			return nil, fmt.Errorf("get current account failed: %w", err)
   514  		}
   515  		switch x := acc.RootInfo.(type) {
   516  		case *common.TeamRootInfo:
   517  			f.ns = x.RootNamespaceId
   518  		case *common.UserRootInfo:
   519  			f.ns = x.RootNamespaceId
   520  		default:
   521  			return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
   522  		}
   523  		fs.Debugf(f, "Using root namespace %q", f.ns)
   524  	}
   525  	f.setRoot(root)
   526  
   527  	// See if the root is actually an object
   528  	if f.root != "" {
   529  		_, err = f.getFileMetadata(ctx, f.slashRoot)
   530  		if err == nil {
   531  			newRoot := path.Dir(f.root)
   532  			if newRoot == "." {
   533  				newRoot = ""
   534  			}
   535  			f.setRoot(newRoot)
   536  			// return an error with an fs which points to the parent
   537  			return f, fs.ErrorIsFile
   538  		}
   539  	}
   540  	return f, nil
   541  }
   542  
   543  // headerGenerator for dropbox sdk
   544  func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string {
   545  	if f.ns == "" {
   546  		return map[string]string{}
   547  	}
   548  	return map[string]string{
   549  		"Dropbox-API-Path-Root": `{".tag": "namespace_id", "namespace_id": "` + f.ns + `"}`,
   550  	}
   551  }
   552  
   553  // Sets root in f
   554  func (f *Fs) setRoot(root string) {
   555  	f.root = strings.Trim(root, "/")
   556  	f.slashRoot = "/" + f.root
   557  	f.slashRootSlash = f.slashRoot
   558  	if f.root != "" {
   559  		f.slashRootSlash += "/"
   560  	}
   561  }
   562  
   563  // getMetadata gets the metadata for a file or directory
   564  func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
   565  	err = f.pacer.Call(func() (bool, error) {
   566  		entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
   567  			Path: f.opt.Enc.FromStandardPath(objPath),
   568  		})
   569  		return shouldRetry(ctx, err)
   570  	})
   571  	if err != nil {
   572  		switch e := err.(type) {
   573  		case files.GetMetadataAPIError:
   574  			if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
   575  				notFound = true
   576  				err = nil
   577  			}
   578  		}
   579  	}
   580  	return
   581  }
   582  
   583  // getFileMetadata gets the metadata for a file
   584  func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
   585  	entry, notFound, err := f.getMetadata(ctx, filePath)
   586  	if err != nil {
   587  		return nil, err
   588  	}
   589  	if notFound {
   590  		return nil, fs.ErrorObjectNotFound
   591  	}
   592  	fileInfo, ok := entry.(*files.FileMetadata)
   593  	if !ok {
   594  		if _, ok = entry.(*files.FolderMetadata); ok {
   595  			return nil, fs.ErrorIsDir
   596  		}
   597  		return nil, fs.ErrorNotAFile
   598  	}
   599  	return fileInfo, nil
   600  }
   601  
   602  // getDirMetadata gets the metadata for a directory
   603  func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
   604  	entry, notFound, err := f.getMetadata(ctx, dirPath)
   605  	if err != nil {
   606  		return nil, err
   607  	}
   608  	if notFound {
   609  		return nil, fs.ErrorDirNotFound
   610  	}
   611  	dirInfo, ok := entry.(*files.FolderMetadata)
   612  	if !ok {
   613  		return nil, fs.ErrorIsFile
   614  	}
   615  	return dirInfo, nil
   616  }
   617  
   618  // Return an Object from a path
   619  //
   620  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   621  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *files.FileMetadata) (fs.Object, error) {
   622  	o := &Object{
   623  		fs:     f,
   624  		remote: remote,
   625  	}
   626  	var err error
   627  	if info != nil {
   628  		err = o.setMetadataFromEntry(info)
   629  	} else {
   630  		err = o.readEntryAndSetMetadata(ctx)
   631  	}
   632  	if err != nil {
   633  		return nil, err
   634  	}
   635  	return o, nil
   636  }
   637  
   638  // NewObject finds the Object at remote.  If it can't be found
   639  // it returns the error fs.ErrorObjectNotFound.
   640  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   641  	if f.opt.SharedFiles {
   642  		return f.findSharedFile(ctx, remote)
   643  	}
   644  	return f.newObjectWithInfo(ctx, remote, nil)
   645  }
   646  
   647  // listSharedFolders lists all available shared folders mounted and not mounted
   648  // we'll need the id later so we have to return them in original format
   649  func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
   650  	started := false
   651  	var res *sharing.ListFoldersResult
   652  	for {
   653  		if !started {
   654  			arg := sharing.ListFoldersArgs{
   655  				Limit: 100,
   656  			}
   657  			err := f.pacer.Call(func() (bool, error) {
   658  				res, err = f.sharing.ListFolders(&arg)
   659  				return shouldRetry(ctx, err)
   660  			})
   661  			if err != nil {
   662  				return nil, err
   663  			}
   664  			started = true
   665  		} else {
   666  			arg := sharing.ListFoldersContinueArg{
   667  				Cursor: res.Cursor,
   668  			}
   669  			err := f.pacer.Call(func() (bool, error) {
   670  				res, err = f.sharing.ListFoldersContinue(&arg)
   671  				return shouldRetry(ctx, err)
   672  			})
   673  			if err != nil {
   674  				return nil, fmt.Errorf("list continue: %w", err)
   675  			}
   676  		}
   677  		for _, entry := range res.Entries {
   678  			leaf := f.opt.Enc.ToStandardName(entry.Name)
   679  			d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
   680  			entries = append(entries, d)
   681  			if err != nil {
   682  				return nil, err
   683  			}
   684  		}
   685  		if res.Cursor == "" {
   686  			break
   687  		}
   688  	}
   689  
   690  	return entries, nil
   691  }
   692  
   693  // findSharedFolder find the id for a given shared folder name
   694  // somewhat annoyingly there is no endpoint to query a shared folder by it's name
   695  // so our only option is to iterate over all shared folders
   696  func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
   697  	entries, err := f.listSharedFolders(ctx)
   698  	if err != nil {
   699  		return "", err
   700  	}
   701  	for _, entry := range entries {
   702  		if entry.(*fs.Dir).Remote() == name {
   703  			return entry.(*fs.Dir).ID(), nil
   704  		}
   705  	}
   706  	return "", fs.ErrorDirNotFound
   707  }
   708  
   709  // mountSharedFolder mount a shared folder to the root namespace
   710  func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
   711  	arg := sharing.MountFolderArg{
   712  		SharedFolderId: id,
   713  	}
   714  	err := f.pacer.Call(func() (bool, error) {
   715  		_, err := f.sharing.MountFolder(&arg)
   716  		return shouldRetry(ctx, err)
   717  	})
   718  	return err
   719  }
   720  
   721  // listReceivedFiles lists shared the user as access to (note this means individual
   722  // files not files contained in shared folders)
   723  func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
   724  	started := false
   725  	var res *sharing.ListFilesResult
   726  	for {
   727  		if !started {
   728  			arg := sharing.ListFilesArg{
   729  				Limit: 100,
   730  			}
   731  			err := f.pacer.Call(func() (bool, error) {
   732  				res, err = f.sharing.ListReceivedFiles(&arg)
   733  				return shouldRetry(ctx, err)
   734  			})
   735  			if err != nil {
   736  				return nil, err
   737  			}
   738  			started = true
   739  		} else {
   740  			arg := sharing.ListFilesContinueArg{
   741  				Cursor: res.Cursor,
   742  			}
   743  			err := f.pacer.Call(func() (bool, error) {
   744  				res, err = f.sharing.ListReceivedFilesContinue(&arg)
   745  				return shouldRetry(ctx, err)
   746  			})
   747  			if err != nil {
   748  				return nil, fmt.Errorf("list continue: %w", err)
   749  			}
   750  		}
   751  		for _, entry := range res.Entries {
   752  			fmt.Printf("%+v\n", entry)
   753  			entryPath := entry.Name
   754  			o := &Object{
   755  				fs:      f,
   756  				url:     entry.PreviewUrl,
   757  				remote:  entryPath,
   758  				modTime: *entry.TimeInvited,
   759  			}
   760  			if err != nil {
   761  				return nil, err
   762  			}
   763  			entries = append(entries, o)
   764  		}
   765  		if res.Cursor == "" {
   766  			break
   767  		}
   768  	}
   769  	return entries, nil
   770  }
   771  
   772  func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
   773  	files, err := f.listReceivedFiles(ctx)
   774  	if err != nil {
   775  		return nil, err
   776  	}
   777  	for _, entry := range files {
   778  		if entry.(*Object).remote == name {
   779  			return entry.(*Object), nil
   780  		}
   781  	}
   782  	return nil, fs.ErrorObjectNotFound
   783  }
   784  
   785  // List the objects and directories in dir into entries.  The
   786  // entries can be returned in any order but should be for a
   787  // complete directory.
   788  //
   789  // dir should be "" to list the root, and should not have
   790  // trailing slashes.
   791  //
   792  // This should return ErrDirNotFound if the directory isn't
   793  // found.
   794  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   795  	if f.opt.SharedFiles {
   796  		return f.listReceivedFiles(ctx)
   797  	}
   798  	if f.opt.SharedFolders {
   799  		return f.listSharedFolders(ctx)
   800  	}
   801  
   802  	root := f.slashRoot
   803  	if dir != "" {
   804  		root += "/" + dir
   805  	}
   806  
   807  	started := false
   808  	var res *files.ListFolderResult
   809  	for {
   810  		if !started {
   811  			arg := files.ListFolderArg{
   812  				Path:      f.opt.Enc.FromStandardPath(root),
   813  				Recursive: false,
   814  				Limit:     1000,
   815  			}
   816  			if root == "/" {
   817  				arg.Path = "" // Specify root folder as empty string
   818  			}
   819  			err = f.pacer.Call(func() (bool, error) {
   820  				res, err = f.srv.ListFolder(&arg)
   821  				return shouldRetry(ctx, err)
   822  			})
   823  			if err != nil {
   824  				switch e := err.(type) {
   825  				case files.ListFolderAPIError:
   826  					if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
   827  						err = fs.ErrorDirNotFound
   828  					}
   829  				}
   830  				return nil, err
   831  			}
   832  			started = true
   833  		} else {
   834  			arg := files.ListFolderContinueArg{
   835  				Cursor: res.Cursor,
   836  			}
   837  			err = f.pacer.Call(func() (bool, error) {
   838  				res, err = f.srv.ListFolderContinue(&arg)
   839  				return shouldRetry(ctx, err)
   840  			})
   841  			if err != nil {
   842  				return nil, fmt.Errorf("list continue: %w", err)
   843  			}
   844  		}
   845  		for _, entry := range res.Entries {
   846  			var fileInfo *files.FileMetadata
   847  			var folderInfo *files.FolderMetadata
   848  			var metadata *files.Metadata
   849  			switch info := entry.(type) {
   850  			case *files.FolderMetadata:
   851  				folderInfo = info
   852  				metadata = &info.Metadata
   853  			case *files.FileMetadata:
   854  				fileInfo = info
   855  				metadata = &info.Metadata
   856  			default:
   857  				fs.Errorf(f, "Unknown type %T", entry)
   858  				continue
   859  			}
   860  
   861  			// Only the last element is reliably cased in PathDisplay
   862  			entryPath := metadata.PathDisplay
   863  			leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
   864  			remote := path.Join(dir, leaf)
   865  			if folderInfo != nil {
   866  				d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
   867  				entries = append(entries, d)
   868  			} else if fileInfo != nil {
   869  				o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
   870  				if err != nil {
   871  					return nil, err
   872  				}
   873  				entries = append(entries, o)
   874  			}
   875  		}
   876  		if !res.HasMore {
   877  			break
   878  		}
   879  	}
   880  	return entries, nil
   881  }
   882  
   883  // Put the object
   884  //
   885  // Copy the reader in to the new object which is returned.
   886  //
   887  // The new object may have been created if an error is returned
   888  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   889  	if f.opt.SharedFiles || f.opt.SharedFolders {
   890  		return nil, errNotSupportedInSharedMode
   891  	}
   892  	// Temporary Object under construction
   893  	o := &Object{
   894  		fs:     f,
   895  		remote: src.Remote(),
   896  	}
   897  	return o, o.Update(ctx, in, src, options...)
   898  }
   899  
   900  // PutStream uploads to the remote path with the modTime given of indeterminate size
   901  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   902  	return f.Put(ctx, in, src, options...)
   903  }
   904  
   905  // Mkdir creates the container if it doesn't exist
   906  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   907  	if f.opt.SharedFiles || f.opt.SharedFolders {
   908  		return errNotSupportedInSharedMode
   909  	}
   910  	root := path.Join(f.slashRoot, dir)
   911  
   912  	// can't create or run metadata on root
   913  	if root == "/" {
   914  		return nil
   915  	}
   916  
   917  	// check directory doesn't exist
   918  	_, err := f.getDirMetadata(ctx, root)
   919  	if err == nil {
   920  		return nil // directory exists already
   921  	} else if err != fs.ErrorDirNotFound {
   922  		return err // some other error
   923  	}
   924  
   925  	// create it
   926  	arg2 := files.CreateFolderArg{
   927  		Path: f.opt.Enc.FromStandardPath(root),
   928  	}
   929  	// Don't attempt to create filenames that are too long
   930  	if cErr := checkPathLength(arg2.Path); cErr != nil {
   931  		return cErr
   932  	}
   933  	err = f.pacer.Call(func() (bool, error) {
   934  		_, err = f.srv.CreateFolderV2(&arg2)
   935  		return shouldRetry(ctx, err)
   936  	})
   937  	return err
   938  }
   939  
   940  // purgeCheck removes the root directory, if check is set then it
   941  // refuses to do so if it has anything in
   942  func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
   943  	root := path.Join(f.slashRoot, dir)
   944  
   945  	// can't remove root
   946  	if root == "/" {
   947  		return errors.New("can't remove root directory")
   948  	}
   949  	encRoot := f.opt.Enc.FromStandardPath(root)
   950  
   951  	if check {
   952  		// check directory exists
   953  		_, err = f.getDirMetadata(ctx, root)
   954  		if err != nil {
   955  			return fmt.Errorf("Rmdir: %w", err)
   956  		}
   957  
   958  		// check directory empty
   959  		arg := files.ListFolderArg{
   960  			Path:      encRoot,
   961  			Recursive: false,
   962  		}
   963  		if root == "/" {
   964  			arg.Path = "" // Specify root folder as empty string
   965  		}
   966  		var res *files.ListFolderResult
   967  		err = f.pacer.Call(func() (bool, error) {
   968  			res, err = f.srv.ListFolder(&arg)
   969  			return shouldRetry(ctx, err)
   970  		})
   971  		if err != nil {
   972  			return fmt.Errorf("Rmdir: %w", err)
   973  		}
   974  		if len(res.Entries) != 0 {
   975  			return errors.New("directory not empty")
   976  		}
   977  	}
   978  
   979  	// remove it
   980  	err = f.pacer.Call(func() (bool, error) {
   981  		_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot})
   982  		return shouldRetry(ctx, err)
   983  	})
   984  	return err
   985  }
   986  
   987  // Rmdir deletes the container
   988  //
   989  // Returns an error if it isn't empty
   990  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   991  	if f.opt.SharedFiles || f.opt.SharedFolders {
   992  		return errNotSupportedInSharedMode
   993  	}
   994  	return f.purgeCheck(ctx, dir, true)
   995  }
   996  
   997  // Precision returns the precision
   998  func (f *Fs) Precision() time.Duration {
   999  	return time.Second
  1000  }
  1001  
  1002  // Copy src to this remote using server-side copy operations.
  1003  //
  1004  // This is stored with the remote path given.
  1005  //
  1006  // It returns the destination Object and a possible error.
  1007  //
  1008  // Will only be called if src.Fs().Name() == f.Name()
  1009  //
  1010  // If it isn't possible then return fs.ErrorCantCopy
  1011  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1012  	srcObj, ok := src.(*Object)
  1013  	if !ok {
  1014  		fs.Debugf(src, "Can't copy - not same remote type")
  1015  		return nil, fs.ErrorCantCopy
  1016  	}
  1017  
  1018  	// Temporary Object under construction
  1019  	dstObj := &Object{
  1020  		fs:     f,
  1021  		remote: remote,
  1022  	}
  1023  
  1024  	// Copy
  1025  	arg := files.RelocationArg{
  1026  		RelocationPath: files.RelocationPath{
  1027  			FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
  1028  			ToPath:   f.opt.Enc.FromStandardPath(dstObj.remotePath()),
  1029  		},
  1030  	}
  1031  	var err error
  1032  	var result *files.RelocationResult
  1033  	err = f.pacer.Call(func() (bool, error) {
  1034  		result, err = f.srv.CopyV2(&arg)
  1035  		return shouldRetry(ctx, err)
  1036  	})
  1037  	if err != nil {
  1038  		return nil, fmt.Errorf("copy failed: %w", err)
  1039  	}
  1040  
  1041  	// Set the metadata
  1042  	fileInfo, ok := result.Metadata.(*files.FileMetadata)
  1043  	if !ok {
  1044  		return nil, fs.ErrorNotAFile
  1045  	}
  1046  	err = dstObj.setMetadataFromEntry(fileInfo)
  1047  	if err != nil {
  1048  		return nil, fmt.Errorf("copy failed: %w", err)
  1049  	}
  1050  
  1051  	return dstObj, nil
  1052  }
  1053  
  1054  // Purge deletes all the files and the container
  1055  //
  1056  // Optional interface: Only implement this if you have a way of
  1057  // deleting all the files quicker than just running Remove() on the
  1058  // result of List()
  1059  func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
  1060  	return f.purgeCheck(ctx, dir, false)
  1061  }
  1062  
  1063  // Move src to this remote using server-side move operations.
  1064  //
  1065  // This is stored with the remote path given.
  1066  //
  1067  // It returns the destination Object and a possible error.
  1068  //
  1069  // Will only be called if src.Fs().Name() == f.Name()
  1070  //
  1071  // If it isn't possible then return fs.ErrorCantMove
  1072  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1073  	srcObj, ok := src.(*Object)
  1074  	if !ok {
  1075  		fs.Debugf(src, "Can't move - not same remote type")
  1076  		return nil, fs.ErrorCantMove
  1077  	}
  1078  
  1079  	// Temporary Object under construction
  1080  	dstObj := &Object{
  1081  		fs:     f,
  1082  		remote: remote,
  1083  	}
  1084  
  1085  	// Do the move
  1086  	arg := files.RelocationArg{
  1087  		RelocationPath: files.RelocationPath{
  1088  			FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
  1089  			ToPath:   f.opt.Enc.FromStandardPath(dstObj.remotePath()),
  1090  		},
  1091  	}
  1092  	var err error
  1093  	var result *files.RelocationResult
  1094  	err = f.pacer.Call(func() (bool, error) {
  1095  		result, err = f.srv.MoveV2(&arg)
  1096  		return shouldRetry(ctx, err)
  1097  	})
  1098  	if err != nil {
  1099  		return nil, fmt.Errorf("move failed: %w", err)
  1100  	}
  1101  
  1102  	// Set the metadata
  1103  	fileInfo, ok := result.Metadata.(*files.FileMetadata)
  1104  	if !ok {
  1105  		return nil, fs.ErrorNotAFile
  1106  	}
  1107  	err = dstObj.setMetadataFromEntry(fileInfo)
  1108  	if err != nil {
  1109  		return nil, fmt.Errorf("move failed: %w", err)
  1110  	}
  1111  	return dstObj, nil
  1112  }
  1113  
  1114  // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
  1115  func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
  1116  	absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote))
  1117  	fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
  1118  	createArg := sharing.CreateSharedLinkWithSettingsArg{
  1119  		Path: absPath,
  1120  		Settings: &sharing.SharedLinkSettings{
  1121  			RequestedVisibility: &sharing.RequestedVisibility{
  1122  				Tagged: dropbox.Tagged{Tag: sharing.RequestedVisibilityPublic},
  1123  			},
  1124  			Audience: &sharing.LinkAudience{
  1125  				Tagged: dropbox.Tagged{Tag: sharing.LinkAudiencePublic},
  1126  			},
  1127  			Access: &sharing.RequestedLinkAccessLevel{
  1128  				Tagged: dropbox.Tagged{Tag: sharing.RequestedLinkAccessLevelViewer},
  1129  			},
  1130  		},
  1131  	}
  1132  	if expire < fs.DurationOff {
  1133  		expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
  1134  		createArg.Settings.Expires = &expiryTime
  1135  	}
  1136  
  1137  	var linkRes sharing.IsSharedLinkMetadata
  1138  	err = f.pacer.Call(func() (bool, error) {
  1139  		linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
  1140  		return shouldRetry(ctx, err)
  1141  	})
  1142  
  1143  	if err != nil && strings.Contains(err.Error(),
  1144  		sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
  1145  		fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
  1146  		listArg := sharing.ListSharedLinksArg{
  1147  			Path:       absPath,
  1148  			DirectOnly: true,
  1149  		}
  1150  		var listRes *sharing.ListSharedLinksResult
  1151  		err = f.pacer.Call(func() (bool, error) {
  1152  			listRes, err = f.sharing.ListSharedLinks(&listArg)
  1153  			return shouldRetry(ctx, err)
  1154  		})
  1155  		if err != nil {
  1156  			return
  1157  		}
  1158  		if len(listRes.Links) == 0 {
  1159  			err = errors.New("sharing link already exists, but list came back empty")
  1160  			return
  1161  		}
  1162  		linkRes = listRes.Links[0]
  1163  	}
  1164  	if err == nil {
  1165  		switch res := linkRes.(type) {
  1166  		case *sharing.FileLinkMetadata:
  1167  			link = res.Url
  1168  		case *sharing.FolderLinkMetadata:
  1169  			link = res.Url
  1170  		default:
  1171  			err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res)
  1172  		}
  1173  	}
  1174  	return
  1175  }
  1176  
  1177  // DirMove moves src, srcRemote to this remote at dstRemote
  1178  // using server-side move operations.
  1179  //
  1180  // Will only be called if src.Fs().Name() == f.Name()
  1181  //
  1182  // If it isn't possible then return fs.ErrorCantDirMove
  1183  //
  1184  // If destination exists then return fs.ErrorDirExists
  1185  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1186  	srcFs, ok := src.(*Fs)
  1187  	if !ok {
  1188  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  1189  		return fs.ErrorCantDirMove
  1190  	}
  1191  	srcPath := path.Join(srcFs.slashRoot, srcRemote)
  1192  	dstPath := path.Join(f.slashRoot, dstRemote)
  1193  
  1194  	// Check if destination exists
  1195  	_, err := f.getDirMetadata(ctx, dstPath)
  1196  	if err == nil {
  1197  		return fs.ErrorDirExists
  1198  	} else if err != fs.ErrorDirNotFound {
  1199  		return err
  1200  	}
  1201  
  1202  	// Make sure the parent directory exists
  1203  	// ...apparently not necessary
  1204  
  1205  	// Do the move
  1206  	arg := files.RelocationArg{
  1207  		RelocationPath: files.RelocationPath{
  1208  			FromPath: f.opt.Enc.FromStandardPath(srcPath),
  1209  			ToPath:   f.opt.Enc.FromStandardPath(dstPath),
  1210  		},
  1211  	}
  1212  	err = f.pacer.Call(func() (bool, error) {
  1213  		_, err = f.srv.MoveV2(&arg)
  1214  		return shouldRetry(ctx, err)
  1215  	})
  1216  	if err != nil {
  1217  		return fmt.Errorf("MoveDir failed: %w", err)
  1218  	}
  1219  
  1220  	return nil
  1221  }
  1222  
  1223  // About gets quota information
  1224  func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
  1225  	var q *users.SpaceUsage
  1226  	err = f.pacer.Call(func() (bool, error) {
  1227  		q, err = f.users.GetSpaceUsage()
  1228  		return shouldRetry(ctx, err)
  1229  	})
  1230  	if err != nil {
  1231  		return nil, err
  1232  	}
  1233  	var total uint64
  1234  	used := q.Used
  1235  	if q.Allocation != nil {
  1236  		if q.Allocation.Individual != nil {
  1237  			total += q.Allocation.Individual.Allocated
  1238  		}
  1239  		if q.Allocation.Team != nil {
  1240  			total += q.Allocation.Team.Allocated
  1241  			// Override used with Team.Used as this includes q.Used already
  1242  			used = q.Allocation.Team.Used
  1243  		}
  1244  	}
  1245  	usage = &fs.Usage{
  1246  		Total: fs.NewUsageValue(int64(total)),        // quota of bytes that can be used
  1247  		Used:  fs.NewUsageValue(int64(used)),         // bytes in use
  1248  		Free:  fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
  1249  	}
  1250  	return usage, nil
  1251  }
  1252  
  1253  // ChangeNotify calls the passed function with a path that has had changes.
  1254  // If the implementation uses polling, it should adhere to the given interval.
  1255  //
  1256  // Automatically restarts itself in case of unexpected behavior of the remote.
  1257  //
  1258  // Close the returned channel to stop being notified.
  1259  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
  1260  	go func() {
  1261  		// get the StartCursor early so all changes from now on get processed
  1262  		startCursor, err := f.changeNotifyCursor(ctx)
  1263  		if err != nil {
  1264  			fs.Infof(f, "Failed to get StartCursor: %s", err)
  1265  		}
  1266  		var ticker *time.Ticker
  1267  		var tickerC <-chan time.Time
  1268  		for {
  1269  			select {
  1270  			case pollInterval, ok := <-pollIntervalChan:
  1271  				if !ok {
  1272  					if ticker != nil {
  1273  						ticker.Stop()
  1274  					}
  1275  					return
  1276  				}
  1277  				if ticker != nil {
  1278  					ticker.Stop()
  1279  					ticker, tickerC = nil, nil
  1280  				}
  1281  				if pollInterval != 0 {
  1282  					ticker = time.NewTicker(pollInterval)
  1283  					tickerC = ticker.C
  1284  				}
  1285  			case <-tickerC:
  1286  				if startCursor == "" {
  1287  					startCursor, err = f.changeNotifyCursor(ctx)
  1288  					if err != nil {
  1289  						fs.Infof(f, "Failed to get StartCursor: %s", err)
  1290  						continue
  1291  					}
  1292  				}
  1293  				fs.Debugf(f, "Checking for changes on remote")
  1294  				startCursor, err = f.changeNotifyRunner(ctx, notifyFunc, startCursor)
  1295  				if err != nil {
  1296  					fs.Infof(f, "Change notify listener failure: %s", err)
  1297  				}
  1298  			}
  1299  		}
  1300  	}()
  1301  }
  1302  
  1303  func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error) {
  1304  	var startCursor *files.ListFolderGetLatestCursorResult
  1305  
  1306  	err = f.pacer.Call(func() (bool, error) {
  1307  		arg := files.ListFolderArg{
  1308  			Path:      f.opt.Enc.FromStandardPath(f.slashRoot),
  1309  			Recursive: true,
  1310  		}
  1311  
  1312  		if arg.Path == "/" {
  1313  			arg.Path = ""
  1314  		}
  1315  
  1316  		startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
  1317  
  1318  		return shouldRetry(ctx, err)
  1319  	})
  1320  	if err != nil {
  1321  		return
  1322  	}
  1323  	return startCursor.Cursor, nil
  1324  }
  1325  
  1326  func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startCursor string) (newCursor string, err error) {
  1327  	cursor := startCursor
  1328  	var res *files.ListFolderLongpollResult
  1329  
  1330  	// Dropbox sets a timeout range of 30 - 480
  1331  	timeout := uint64(f.ci.TimeoutOrInfinite() / time.Second)
  1332  
  1333  	if timeout < 30 {
  1334  		timeout = 30
  1335  		fs.Debugf(f, "Increasing poll interval to minimum 30s")
  1336  	}
  1337  
  1338  	if timeout > 480 {
  1339  		timeout = 480
  1340  		fs.Debugf(f, "Decreasing poll interval to maximum 480s")
  1341  	}
  1342  
  1343  	err = f.pacer.Call(func() (bool, error) {
  1344  		args := files.ListFolderLongpollArg{
  1345  			Cursor:  cursor,
  1346  			Timeout: timeout,
  1347  		}
  1348  
  1349  		res, err = f.svc.ListFolderLongpoll(&args)
  1350  		return shouldRetry(ctx, err)
  1351  	})
  1352  	if err != nil {
  1353  		return
  1354  	}
  1355  
  1356  	if !res.Changes {
  1357  		return cursor, nil
  1358  	}
  1359  
  1360  	if res.Backoff != 0 {
  1361  		fs.Debugf(f, "Waiting to poll for %d seconds", res.Backoff)
  1362  		time.Sleep(time.Duration(res.Backoff) * time.Second)
  1363  	}
  1364  
  1365  	for {
  1366  		var changeList *files.ListFolderResult
  1367  
  1368  		arg := files.ListFolderContinueArg{
  1369  			Cursor: cursor,
  1370  		}
  1371  		err = f.pacer.Call(func() (bool, error) {
  1372  			changeList, err = f.srv.ListFolderContinue(&arg)
  1373  			return shouldRetry(ctx, err)
  1374  		})
  1375  		if err != nil {
  1376  			return "", fmt.Errorf("list continue: %w", err)
  1377  		}
  1378  		cursor = changeList.Cursor
  1379  		var entryType fs.EntryType
  1380  		for _, entry := range changeList.Entries {
  1381  			entryPath := ""
  1382  			switch info := entry.(type) {
  1383  			case *files.FolderMetadata:
  1384  				entryType = fs.EntryDirectory
  1385  				entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
  1386  			case *files.FileMetadata:
  1387  				entryType = fs.EntryObject
  1388  				entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
  1389  			case *files.DeletedMetadata:
  1390  				entryType = fs.EntryObject
  1391  				entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
  1392  			default:
  1393  				fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
  1394  				continue
  1395  			}
  1396  
  1397  			if entryPath != "" {
  1398  				notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
  1399  			}
  1400  		}
  1401  		if !changeList.HasMore {
  1402  			break
  1403  		}
  1404  	}
  1405  	return cursor, nil
  1406  }
  1407  
  1408  // Hashes returns the supported hash sets.
  1409  func (f *Fs) Hashes() hash.Set {
  1410  	return hash.Set(DbHashType)
  1411  }
  1412  
  1413  // Shutdown the backend, closing any background tasks and any
  1414  // cached connections.
  1415  func (f *Fs) Shutdown(ctx context.Context) error {
  1416  	f.batcher.Shutdown()
  1417  	return nil
  1418  }
  1419  
  1420  // ------------------------------------------------------------
  1421  
  1422  // Fs returns the parent Fs
  1423  func (o *Object) Fs() fs.Info {
  1424  	return o.fs
  1425  }
  1426  
  1427  // Return a string version
  1428  func (o *Object) String() string {
  1429  	if o == nil {
  1430  		return "<nil>"
  1431  	}
  1432  	return o.remote
  1433  }
  1434  
  1435  // Remote returns the remote path
  1436  func (o *Object) Remote() string {
  1437  	return o.remote
  1438  }
  1439  
  1440  // ID returns the object id
  1441  func (o *Object) ID() string {
  1442  	return o.id
  1443  }
  1444  
  1445  // Hash returns the dropbox special hash
  1446  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1447  	if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
  1448  		return "", errNotSupportedInSharedMode
  1449  	}
  1450  	if t != DbHashType {
  1451  		return "", hash.ErrUnsupported
  1452  	}
  1453  	err := o.readMetaData(ctx)
  1454  	if err != nil {
  1455  		return "", fmt.Errorf("failed to read hash from metadata: %w", err)
  1456  	}
  1457  	return o.hash, nil
  1458  }
  1459  
  1460  // Size returns the size of an object in bytes
  1461  func (o *Object) Size() int64 {
  1462  	return o.bytes
  1463  }
  1464  
  1465  // setMetadataFromEntry sets the fs data from a files.FileMetadata
  1466  //
  1467  // This isn't a complete set of metadata and has an inaccurate date
  1468  func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
  1469  	o.id = info.Id
  1470  	o.bytes = int64(info.Size)
  1471  	o.modTime = info.ClientModified
  1472  	o.hash = info.ContentHash
  1473  	return nil
  1474  }
  1475  
  1476  // Reads the entry for a file from dropbox
  1477  func (o *Object) readEntry(ctx context.Context) (*files.FileMetadata, error) {
  1478  	return o.fs.getFileMetadata(ctx, o.remotePath())
  1479  }
  1480  
  1481  // Read entry if not set and set metadata from it
  1482  func (o *Object) readEntryAndSetMetadata(ctx context.Context) error {
  1483  	// Last resort set time from client
  1484  	if !o.modTime.IsZero() {
  1485  		return nil
  1486  	}
  1487  	entry, err := o.readEntry(ctx)
  1488  	if err != nil {
  1489  		return err
  1490  	}
  1491  	return o.setMetadataFromEntry(entry)
  1492  }
  1493  
  1494  // Returns the remote path for the object
  1495  func (o *Object) remotePath() string {
  1496  	return o.fs.slashRootSlash + o.remote
  1497  }
  1498  
  1499  // readMetaData gets the info if it hasn't already been fetched
  1500  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1501  	if !o.modTime.IsZero() {
  1502  		return nil
  1503  	}
  1504  	// Last resort
  1505  	return o.readEntryAndSetMetadata(ctx)
  1506  }
  1507  
  1508  // ModTime returns the modification time of the object
  1509  //
  1510  // It attempts to read the objects mtime and if that isn't present the
  1511  // LastModified returned in the http headers
  1512  func (o *Object) ModTime(ctx context.Context) time.Time {
  1513  	err := o.readMetaData(ctx)
  1514  	if err != nil {
  1515  		fs.Debugf(o, "Failed to read metadata: %v", err)
  1516  		return time.Now()
  1517  	}
  1518  	return o.modTime
  1519  }
  1520  
  1521  // SetModTime sets the modification time of the local fs object
  1522  //
  1523  // Commits the datastore
  1524  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1525  	// Dropbox doesn't have a way of doing this so returning this
  1526  	// error will cause the file to be deleted first then
  1527  	// re-uploaded to set the time.
  1528  	return fs.ErrorCantSetModTimeWithoutDelete
  1529  }
  1530  
  1531  // Storable returns whether this object is storable
  1532  func (o *Object) Storable() bool {
  1533  	return true
  1534  }
  1535  
  1536  // Open an object for read
  1537  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1538  	if o.fs.opt.SharedFiles {
  1539  		if len(options) != 0 {
  1540  			return nil, errors.New("OpenOptions not supported for shared files")
  1541  		}
  1542  		arg := sharing.GetSharedLinkMetadataArg{
  1543  			Url: o.url,
  1544  		}
  1545  		err = o.fs.pacer.Call(func() (bool, error) {
  1546  			_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
  1547  			return shouldRetry(ctx, err)
  1548  		})
  1549  		if err != nil {
  1550  			return nil, err
  1551  		}
  1552  		return
  1553  	}
  1554  
  1555  	fs.FixRangeOption(options, o.bytes)
  1556  	headers := fs.OpenOptionHeaders(options)
  1557  	arg := files.DownloadArg{
  1558  		Path:         o.id,
  1559  		ExtraHeaders: headers,
  1560  	}
  1561  	err = o.fs.pacer.Call(func() (bool, error) {
  1562  		_, in, err = o.fs.srv.Download(&arg)
  1563  		return shouldRetry(ctx, err)
  1564  	})
  1565  
  1566  	switch e := err.(type) {
  1567  	case files.DownloadAPIError:
  1568  		// Don't attempt to retry copyright violation errors
  1569  		if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
  1570  			return nil, fserrors.NoRetryError(err)
  1571  		}
  1572  	}
  1573  
  1574  	return
  1575  }
  1576  
  1577  // uploadChunked uploads the object in parts
  1578  //
  1579  // Will introduce two additional network requests to start and finish the session.
  1580  // If the size is unknown (i.e. -1) the method incurs one additional
  1581  // request to the Dropbox API that does not carry a payload to close the append session.
  1582  func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
  1583  	// start upload
  1584  	var res *files.UploadSessionStartResult
  1585  	err = o.fs.pacer.Call(func() (bool, error) {
  1586  		res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
  1587  		return shouldRetry(ctx, err)
  1588  	})
  1589  	if err != nil {
  1590  		return nil, err
  1591  	}
  1592  
  1593  	chunkSize := int64(o.fs.opt.ChunkSize)
  1594  	chunks, remainder := size/chunkSize, size%chunkSize
  1595  	if remainder > 0 {
  1596  		chunks++
  1597  	}
  1598  
  1599  	// write chunks
  1600  	in := readers.NewCountingReader(in0)
  1601  	buf := make([]byte, int(chunkSize))
  1602  	cursor := files.UploadSessionCursor{
  1603  		SessionId: res.SessionId,
  1604  		Offset:    0,
  1605  	}
  1606  	appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
  1607  	for currentChunk := 1; ; currentChunk++ {
  1608  		cursor.Offset = in.BytesRead()
  1609  
  1610  		if chunks < 0 {
  1611  			fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
  1612  		} else {
  1613  			fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
  1614  		}
  1615  
  1616  		chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
  1617  		skip := int64(0)
  1618  		err = o.fs.pacer.Call(func() (bool, error) {
  1619  			// seek to the start in case this is a retry
  1620  			if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
  1621  				return false, err
  1622  			}
  1623  			err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
  1624  			// after session is started, we retry everything
  1625  			if err != nil {
  1626  				// Check for incorrect offset error and retry with new offset
  1627  				if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
  1628  					if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
  1629  						correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
  1630  						delta := int64(correctOffset) - int64(cursor.Offset)
  1631  						skip += delta
  1632  						what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
  1633  						if skip < 0 {
  1634  							return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
  1635  						} else if skip == chunkSize {
  1636  							fs.Debugf(o, "%s: chunk received OK - continuing", what)
  1637  							return false, nil
  1638  						} else if skip > chunkSize {
  1639  							// This error should never happen
  1640  							return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
  1641  						}
  1642  						// Skip the sent data on next retry
  1643  						cursor.Offset = uint64(int64(cursor.Offset) + delta)
  1644  						fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
  1645  					}
  1646  				}
  1647  			}
  1648  			return err != nil, err
  1649  		})
  1650  		if err != nil {
  1651  			return nil, err
  1652  		}
  1653  		if appendArg.Close {
  1654  			break
  1655  		}
  1656  
  1657  		if size > 0 {
  1658  			// if size is known, check if next chunk is final
  1659  			appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
  1660  			if in.BytesRead() > uint64(size) {
  1661  				return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
  1662  			}
  1663  		} else {
  1664  			// if size is unknown, upload as long as we can read full chunks from the reader
  1665  			appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
  1666  		}
  1667  	}
  1668  
  1669  	// finish upload
  1670  	cursor.Offset = in.BytesRead()
  1671  	args := &files.UploadSessionFinishArg{
  1672  		Cursor: &cursor,
  1673  		Commit: commitInfo,
  1674  	}
  1675  	// If we are batching then we should have written all the data now
  1676  	// store the commit info now for a batch commit
  1677  	if o.fs.batcher.Batching() {
  1678  		return o.fs.batcher.Commit(ctx, o.remote, args)
  1679  	}
  1680  
  1681  	err = o.fs.pacer.Call(func() (bool, error) {
  1682  		entry, err = o.fs.srv.UploadSessionFinish(args, nil)
  1683  		// If error is insufficient space then don't retry
  1684  		if e, ok := err.(files.UploadSessionFinishAPIError); ok {
  1685  			if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
  1686  				err = fserrors.NoRetryError(err)
  1687  				return false, err
  1688  			}
  1689  		}
  1690  		// after the first chunk is uploaded, we retry everything
  1691  		return err != nil, err
  1692  	})
  1693  	if err != nil {
  1694  		return nil, err
  1695  	}
  1696  	return entry, nil
  1697  }
  1698  
  1699  // checks all the parts of name to see they are below
  1700  // maxFileNameLength runes.
  1701  //
  1702  // This checks the length as runes which isn't quite right as dropbox
  1703  // seems to encode some symbols (eg ☺) as two "characters". This seems
  1704  // like utf-16 except that ☺ doesn't need two characters in utf-16.
  1705  //
  1706  // Using runes instead of what dropbox is using will work for most
  1707  // cases, and when it goes wrong we will upload something we should
  1708  // have detected as too long which is the least damaging way to fail.
  1709  func checkPathLength(name string) (err error) {
  1710  	for next := ""; len(name) > 0; name = next {
  1711  		if slash := strings.IndexRune(name, '/'); slash >= 0 {
  1712  			name, next = name[:slash], name[slash+1:]
  1713  		} else {
  1714  			next = ""
  1715  		}
  1716  		length := utf8.RuneCountInString(name)
  1717  		if length > maxFileNameLength {
  1718  			return fserrors.NoRetryError(fs.ErrorFileNameTooLong)
  1719  		}
  1720  	}
  1721  	return nil
  1722  }
  1723  
  1724  // Update the already existing object
  1725  //
  1726  // Copy the reader into the object updating modTime and size.
  1727  //
  1728  // The new object may have been created if an error is returned
  1729  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1730  	if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
  1731  		return errNotSupportedInSharedMode
  1732  	}
  1733  	remote := o.remotePath()
  1734  	if ignoredFiles.MatchString(remote) {
  1735  		return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
  1736  	}
  1737  	commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
  1738  	commitInfo.Mode.Tag = "overwrite"
  1739  	// The Dropbox API only accepts timestamps in UTC with second precision.
  1740  	clientModified := src.ModTime(ctx).UTC().Round(time.Second)
  1741  	commitInfo.ClientModified = &clientModified
  1742  	// Don't attempt to create filenames that are too long
  1743  	if cErr := checkPathLength(commitInfo.Path); cErr != nil {
  1744  		return cErr
  1745  	}
  1746  
  1747  	size := src.Size()
  1748  	var err error
  1749  	var entry *files.FileMetadata
  1750  	if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
  1751  		entry, err = o.uploadChunked(ctx, in, commitInfo, size)
  1752  	} else {
  1753  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1754  			entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
  1755  			return shouldRetry(ctx, err)
  1756  		})
  1757  	}
  1758  	if err != nil {
  1759  		return fmt.Errorf("upload failed: %w", err)
  1760  	}
  1761  	// If we haven't received data back from batch upload then fake it
  1762  	//
  1763  	// This will only happen if we are uploading async batches
  1764  	if entry == nil {
  1765  		o.bytes = size
  1766  		o.modTime = *commitInfo.ClientModified
  1767  		o.hash = "" // we don't have this
  1768  		return nil
  1769  	}
  1770  	return o.setMetadataFromEntry(entry)
  1771  }
  1772  
  1773  // Remove an object
  1774  func (o *Object) Remove(ctx context.Context) (err error) {
  1775  	if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
  1776  		return errNotSupportedInSharedMode
  1777  	}
  1778  	err = o.fs.pacer.Call(func() (bool, error) {
  1779  		_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
  1780  			Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
  1781  		})
  1782  		return shouldRetry(ctx, err)
  1783  	})
  1784  	return err
  1785  }
  1786  
  1787  // Check the interfaces are satisfied
  1788  var (
  1789  	_ fs.Fs           = (*Fs)(nil)
  1790  	_ fs.Copier       = (*Fs)(nil)
  1791  	_ fs.Purger       = (*Fs)(nil)
  1792  	_ fs.PutStreamer  = (*Fs)(nil)
  1793  	_ fs.Mover        = (*Fs)(nil)
  1794  	_ fs.PublicLinker = (*Fs)(nil)
  1795  	_ fs.DirMover     = (*Fs)(nil)
  1796  	_ fs.Abouter      = (*Fs)(nil)
  1797  	_ fs.Shutdowner   = &Fs{}
  1798  	_ fs.Object       = (*Object)(nil)
  1799  	_ fs.IDer         = (*Object)(nil)
  1800  )