github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/amazonclouddrive/amazonclouddrive.go (about)

     1  // Package amazonclouddrive provides an interface to the Amazon Cloud
     2  // Drive object storage system.
     3  package amazonclouddrive
     4  
     5  /*
     6  FIXME make searching for directory in id and file in id more efficient
     7  - use the name: search parameter - remember the escaping rules
     8  - use Folder GetNode and GetFile
     9  
    10  FIXME make the default for no files and no dirs be (FILE & FOLDER) so
    11  we ignore assets completely!
    12  */
    13  
    14  import (
    15  	"context"
    16  	"encoding/json"
    17  	"fmt"
    18  	"io"
    19  	"log"
    20  	"net/http"
    21  	"path"
    22  	"strings"
    23  	"time"
    24  
    25  	acd "github.com/ncw/go-acd"
    26  	"github.com/pkg/errors"
    27  	"github.com/rclone/rclone/fs"
    28  	"github.com/rclone/rclone/fs/config"
    29  	"github.com/rclone/rclone/fs/config/configmap"
    30  	"github.com/rclone/rclone/fs/config/configstruct"
    31  	"github.com/rclone/rclone/fs/fserrors"
    32  	"github.com/rclone/rclone/fs/fshttp"
    33  	"github.com/rclone/rclone/fs/hash"
    34  	"github.com/rclone/rclone/lib/dircache"
    35  	"github.com/rclone/rclone/lib/encoder"
    36  	"github.com/rclone/rclone/lib/oauthutil"
    37  	"github.com/rclone/rclone/lib/pacer"
    38  	"golang.org/x/oauth2"
    39  )
    40  
    41  const (
    42  	folderKind               = "FOLDER"
    43  	fileKind                 = "FILE"
    44  	statusAvailable          = "AVAILABLE"
    45  	timeFormat               = time.RFC3339 // 2014-03-07T22:31:12.173Z
    46  	minSleep                 = 20 * time.Millisecond
    47  	warnFileSize             = 50000 << 20            // Display warning for files larger than this size
    48  	defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
    49  )
    50  
    51  // Globals
    52  var (
    53  	// Description of how to auth for this app
    54  	acdConfig = &oauth2.Config{
    55  		Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
    56  		Endpoint: oauth2.Endpoint{
    57  			AuthURL:  "https://www.amazon.com/ap/oa",
    58  			TokenURL: "https://api.amazon.com/auth/o2/token",
    59  		},
    60  		ClientID:     "",
    61  		ClientSecret: "",
    62  		RedirectURL:  oauthutil.RedirectURL,
    63  	}
    64  )
    65  
    66  // Register with Fs
    67  func init() {
    68  	fs.Register(&fs.RegInfo{
    69  		Name:        "amazon cloud drive",
    70  		Prefix:      "acd",
    71  		Description: "Amazon Drive",
    72  		NewFs:       NewFs,
    73  		Config: func(name string, m configmap.Mapper) {
    74  			err := oauthutil.Config("amazon cloud drive", name, m, acdConfig, nil)
    75  			if err != nil {
    76  				log.Fatalf("Failed to configure token: %v", err)
    77  			}
    78  		},
    79  		Options: []fs.Option{{
    80  			Name:     config.ConfigClientID,
    81  			Help:     "Amazon Application Client ID.",
    82  			Required: true,
    83  		}, {
    84  			Name:     config.ConfigClientSecret,
    85  			Help:     "Amazon Application Client Secret.",
    86  			Required: true,
    87  		}, {
    88  			Name:     config.ConfigAuthURL,
    89  			Help:     "Auth server URL.\nLeave blank to use Amazon's.",
    90  			Advanced: true,
    91  		}, {
    92  			Name:     config.ConfigTokenURL,
    93  			Help:     "Token server url.\nleave blank to use Amazon's.",
    94  			Advanced: true,
    95  		}, {
    96  			Name:     "checkpoint",
    97  			Help:     "Checkpoint for internal polling (debug).",
    98  			Hide:     fs.OptionHideBoth,
    99  			Advanced: true,
   100  		}, {
   101  			Name: "upload_wait_per_gb",
   102  			Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
   103  
   104  Sometimes Amazon Drive gives an error when a file has been fully
   105  uploaded but the file appears anyway after a little while.  This
   106  happens sometimes for files over 1GB in size and nearly every time for
   107  files bigger than 10GB. This parameter controls the time rclone waits
   108  for the file to appear.
   109  
   110  The default value for this parameter is 3 minutes per GB, so by
   111  default it will wait 3 minutes for every GB uploaded to see if the
   112  file appears.
   113  
   114  You can disable this feature by setting it to 0. This may cause
   115  conflict errors as rclone retries the failed upload but the file will
   116  most likely appear correctly eventually.
   117  
   118  These values were determined empirically by observing lots of uploads
   119  of big files for a range of file sizes.
   120  
   121  Upload with the "-v" flag to see more info about what rclone is doing
   122  in this situation.`,
   123  			Default:  fs.Duration(180 * time.Second),
   124  			Advanced: true,
   125  		}, {
   126  			Name: "templink_threshold",
   127  			Help: `Files >= this size will be downloaded via their tempLink.
   128  
   129  Files this size or more will be downloaded via their "tempLink". This
   130  is to work around a problem with Amazon Drive which blocks downloads
   131  of files bigger than about 10GB.  The default for this is 9GB which
   132  shouldn't need to be changed.
   133  
   134  To download files above this threshold, rclone requests a "tempLink"
   135  which downloads the file through a temporary URL directly from the
   136  underlying S3 storage.`,
   137  			Default:  defaultTempLinkThreshold,
   138  			Advanced: true,
   139  		}, {
   140  			Name:     config.ConfigEncoding,
   141  			Help:     config.ConfigEncodingHelp,
   142  			Advanced: true,
   143  			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
   144  			Default: (encoder.Base |
   145  				encoder.EncodeInvalidUtf8),
   146  		}},
   147  	})
   148  }
   149  
   150  // Options defines the configuration for this backend
   151  type Options struct {
   152  	Checkpoint        string               `config:"checkpoint"`
   153  	UploadWaitPerGB   fs.Duration          `config:"upload_wait_per_gb"`
   154  	TempLinkThreshold fs.SizeSuffix        `config:"templink_threshold"`
   155  	Enc               encoder.MultiEncoder `config:"encoding"`
   156  }
   157  
   158  // Fs represents a remote acd server
   159  type Fs struct {
   160  	name         string             // name of this remote
   161  	features     *fs.Features       // optional features
   162  	opt          Options            // options for this Fs
   163  	c            *acd.Client        // the connection to the acd server
   164  	noAuthClient *http.Client       // unauthenticated http client
   165  	root         string             // the path we are working on
   166  	dirCache     *dircache.DirCache // Map of directory path to directory id
   167  	pacer        *fs.Pacer          // pacer for API calls
   168  	trueRootID   string             // ID of true root directory
   169  	tokenRenewer *oauthutil.Renew   // renew the token on expiry
   170  }
   171  
   172  // Object describes an acd object
   173  //
   174  // Will definitely have info but maybe not meta
   175  type Object struct {
   176  	fs     *Fs       // what this object is part of
   177  	remote string    // The remote path
   178  	info   *acd.Node // Info from the acd object if known
   179  }
   180  
   181  // ------------------------------------------------------------
   182  
   183  // Name of the remote (as passed into NewFs)
   184  func (f *Fs) Name() string {
   185  	return f.name
   186  }
   187  
   188  // Root of the remote (as passed into NewFs)
   189  func (f *Fs) Root() string {
   190  	return f.root
   191  }
   192  
   193  // String converts this Fs to a string
   194  func (f *Fs) String() string {
   195  	return fmt.Sprintf("amazon drive root '%s'", f.root)
   196  }
   197  
   198  // Features returns the optional features of this Fs
   199  func (f *Fs) Features() *fs.Features {
   200  	return f.features
   201  }
   202  
   203  // parsePath parses an acd 'url'
   204  func parsePath(path string) (root string) {
   205  	root = strings.Trim(path, "/")
   206  	return
   207  }
   208  
   209  // retryErrorCodes is a slice of error codes that we will retry
   210  var retryErrorCodes = []int{
   211  	400, // Bad request (seen in "Next token is expired")
   212  	401, // Unauthorized (seen in "Token has expired")
   213  	408, // Request Timeout
   214  	429, // Rate exceeded.
   215  	500, // Get occasional 500 Internal Server Error
   216  	502, // Bad Gateway when doing big listings
   217  	503, // Service Unavailable
   218  	504, // Gateway Time-out
   219  }
   220  
   221  // shouldRetry returns a boolean as to whether this resp and err
   222  // deserve to be retried.  It returns the err as a convenience
   223  func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
   224  	if resp != nil {
   225  		if resp.StatusCode == 401 {
   226  			f.tokenRenewer.Invalidate()
   227  			fs.Debugf(f, "401 error received - invalidating token")
   228  			return true, err
   229  		}
   230  		// Work around receiving this error sporadically on authentication
   231  		//
   232  		// HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
   233  		if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
   234  			fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
   235  			return true, err
   236  		}
   237  	}
   238  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   239  }
   240  
   241  // If query parameters contain X-Amz-Algorithm remove Authorization header
   242  //
   243  // This happens when ACD redirects to S3 for the download.  The oauth
   244  // transport puts an Authorization header in which we need to remove
   245  // otherwise we get this message from AWS
   246  //
   247  // Only one auth mechanism allowed; only the X-Amz-Algorithm query
   248  // parameter, Signature query string parameter or the Authorization
   249  // header should be specified
   250  func filterRequest(req *http.Request) {
   251  	if req.URL.Query().Get("X-Amz-Algorithm") != "" {
   252  		fs.Debugf(nil, "Removing Authorization: header after redirect to S3")
   253  		req.Header.Del("Authorization")
   254  	}
   255  }
   256  
   257  // NewFs constructs an Fs from the path, container:path
   258  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   259  	ctx := context.Background()
   260  	// Parse config into Options struct
   261  	opt := new(Options)
   262  	err := configstruct.Set(m, opt)
   263  	if err != nil {
   264  		return nil, err
   265  	}
   266  	root = parsePath(root)
   267  	baseClient := fshttp.NewClient(fs.Config)
   268  	if do, ok := baseClient.Transport.(interface {
   269  		SetRequestFilter(f func(req *http.Request))
   270  	}); ok {
   271  		do.SetRequestFilter(filterRequest)
   272  	} else {
   273  		fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
   274  	}
   275  	oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
   276  	if err != nil {
   277  		return nil, errors.Wrap(err, "failed to configure Amazon Drive")
   278  	}
   279  
   280  	c := acd.NewClient(oAuthClient)
   281  	f := &Fs{
   282  		name:         name,
   283  		root:         root,
   284  		opt:          *opt,
   285  		c:            c,
   286  		pacer:        fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
   287  		noAuthClient: fshttp.NewClient(fs.Config),
   288  	}
   289  	f.features = (&fs.Features{
   290  		CaseInsensitive:         true,
   291  		ReadMimeType:            true,
   292  		CanHaveEmptyDirectories: true,
   293  	}).Fill(f)
   294  
   295  	// Renew the token in the background
   296  	f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
   297  		_, err := f.getRootInfo()
   298  		return err
   299  	})
   300  
   301  	// Update endpoints
   302  	var resp *http.Response
   303  	err = f.pacer.Call(func() (bool, error) {
   304  		_, resp, err = f.c.Account.GetEndpoints()
   305  		return f.shouldRetry(resp, err)
   306  	})
   307  	if err != nil {
   308  		return nil, errors.Wrap(err, "failed to get endpoints")
   309  	}
   310  
   311  	// Get rootID
   312  	rootInfo, err := f.getRootInfo()
   313  	if err != nil || rootInfo.Id == nil {
   314  		return nil, errors.Wrap(err, "failed to get root")
   315  	}
   316  	f.trueRootID = *rootInfo.Id
   317  
   318  	f.dirCache = dircache.New(root, f.trueRootID, f)
   319  
   320  	// Find the current root
   321  	err = f.dirCache.FindRoot(ctx, false)
   322  	if err != nil {
   323  		// Assume it is a file
   324  		newRoot, remote := dircache.SplitPath(root)
   325  		tempF := *f
   326  		tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
   327  		tempF.root = newRoot
   328  		// Make new Fs which is the parent
   329  		err = tempF.dirCache.FindRoot(ctx, false)
   330  		if err != nil {
   331  			// No root so return old f
   332  			return f, nil
   333  		}
   334  		_, err := tempF.newObjectWithInfo(ctx, remote, nil)
   335  		if err != nil {
   336  			if err == fs.ErrorObjectNotFound {
   337  				// File doesn't exist so return old f
   338  				return f, nil
   339  			}
   340  			return nil, err
   341  		}
   342  		// XXX: update the old f here instead of returning tempF, since
   343  		// `features` were already filled with functions having *f as a receiver.
   344  		// See https://github.com/rclone/rclone/issues/2182
   345  		f.dirCache = tempF.dirCache
   346  		f.root = tempF.root
   347  		// return an error with an fs which points to the parent
   348  		return f, fs.ErrorIsFile
   349  	}
   350  	return f, nil
   351  }
   352  
   353  // getRootInfo gets the root folder info
   354  func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
   355  	var resp *http.Response
   356  	err = f.pacer.Call(func() (bool, error) {
   357  		rootInfo, resp, err = f.c.Nodes.GetRoot()
   358  		return f.shouldRetry(resp, err)
   359  	})
   360  	return rootInfo, err
   361  }
   362  
   363  // Return an Object from a path
   364  //
   365  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   366  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
   367  	o := &Object{
   368  		fs:     f,
   369  		remote: remote,
   370  	}
   371  	if info != nil {
   372  		// Set info but not meta
   373  		o.info = info
   374  	} else {
   375  		err := o.readMetaData(ctx) // reads info and meta, returning an error
   376  		if err != nil {
   377  			return nil, err
   378  		}
   379  	}
   380  	return o, nil
   381  }
   382  
   383  // NewObject finds the Object at remote.  If it can't be found
   384  // it returns the error fs.ErrorObjectNotFound.
   385  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   386  	return f.newObjectWithInfo(ctx, remote, nil)
   387  }
   388  
   389  // FindLeaf finds a directory of name leaf in the folder with ID pathID
   390  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
   391  	//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
   392  	folder := acd.FolderFromId(pathID, f.c.Nodes)
   393  	var resp *http.Response
   394  	var subFolder *acd.Folder
   395  	err = f.pacer.Call(func() (bool, error) {
   396  		subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
   397  		return f.shouldRetry(resp, err)
   398  	})
   399  	if err != nil {
   400  		if err == acd.ErrorNodeNotFound {
   401  			//fs.Debugf(f, "...Not found")
   402  			return "", false, nil
   403  		}
   404  		//fs.Debugf(f, "...Error %v", err)
   405  		return "", false, err
   406  	}
   407  	if subFolder.Status != nil && *subFolder.Status != statusAvailable {
   408  		fs.Debugf(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status)
   409  		time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
   410  		return "", false, nil
   411  	}
   412  	//fs.Debugf(f, "...Found(%q, %v)", *subFolder.Id, leaf)
   413  	return *subFolder.Id, true, nil
   414  }
   415  
   416  // CreateDir makes a directory with pathID as parent and name leaf
   417  func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
   418  	//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
   419  	folder := acd.FolderFromId(pathID, f.c.Nodes)
   420  	var resp *http.Response
   421  	var info *acd.Folder
   422  	err = f.pacer.Call(func() (bool, error) {
   423  		info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
   424  		return f.shouldRetry(resp, err)
   425  	})
   426  	if err != nil {
   427  		//fmt.Printf("...Error %v\n", err)
   428  		return "", err
   429  	}
   430  	//fmt.Printf("...Id %q\n", *info.Id)
   431  	return *info.Id, nil
   432  }
   433  
   434  // list the objects into the function supplied
   435  //
   436  // If directories is set it only sends directories
   437  // User function to process a File item from listAll
   438  //
   439  // Should return true to finish processing
   440  type listAllFn func(*acd.Node) bool
   441  
   442  // Lists the directory required calling the user function on each item found
   443  //
   444  // If the user fn ever returns true then it early exits with found = true
   445  func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
   446  	query := "parents:" + dirID
   447  	if directoriesOnly {
   448  		query += " AND kind:" + folderKind
   449  	} else if filesOnly {
   450  		query += " AND kind:" + fileKind
   451  	} else {
   452  		// FIXME none of these work
   453  		//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
   454  		//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
   455  	}
   456  	opts := acd.NodeListOptions{
   457  		Filters: query,
   458  	}
   459  	var nodes []*acd.Node
   460  	var out []*acd.Node
   461  	//var resp *http.Response
   462  	for {
   463  		var resp *http.Response
   464  		err = f.pacer.CallNoRetry(func() (bool, error) {
   465  			nodes, resp, err = f.c.Nodes.GetNodes(&opts)
   466  			return f.shouldRetry(resp, err)
   467  		})
   468  		if err != nil {
   469  			return false, err
   470  		}
   471  		if nodes == nil {
   472  			break
   473  		}
   474  		for _, node := range nodes {
   475  			if node.Name != nil && node.Id != nil && node.Kind != nil && node.Status != nil {
   476  				// Ignore nodes if not AVAILABLE
   477  				if *node.Status != statusAvailable {
   478  					continue
   479  				}
   480  				// Ignore bogus nodes Amazon Drive sometimes reports
   481  				hasValidParent := false
   482  				for _, parent := range node.Parents {
   483  					if parent == dirID {
   484  						hasValidParent = true
   485  						break
   486  					}
   487  				}
   488  				if !hasValidParent {
   489  					continue
   490  				}
   491  				*node.Name = f.opt.Enc.ToStandardName(*node.Name)
   492  				// Store the nodes up in case we have to retry the listing
   493  				out = append(out, node)
   494  			}
   495  		}
   496  	}
   497  	// Send the nodes now
   498  	for _, node := range out {
   499  		if fn(node) {
   500  			found = true
   501  			break
   502  		}
   503  	}
   504  	return
   505  }
   506  
   507  // List the objects and directories in dir into entries.  The
   508  // entries can be returned in any order but should be for a
   509  // complete directory.
   510  //
   511  // dir should be "" to list the root, and should not have
   512  // trailing slashes.
   513  //
   514  // This should return ErrDirNotFound if the directory isn't
   515  // found.
   516  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   517  	err = f.dirCache.FindRoot(ctx, false)
   518  	if err != nil {
   519  		return nil, err
   520  	}
   521  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
   522  	if err != nil {
   523  		return nil, err
   524  	}
   525  	maxTries := fs.Config.LowLevelRetries
   526  	var iErr error
   527  	for tries := 1; tries <= maxTries; tries++ {
   528  		entries = nil
   529  		_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
   530  			remote := path.Join(dir, *node.Name)
   531  			switch *node.Kind {
   532  			case folderKind:
   533  				// cache the directory ID for later lookups
   534  				f.dirCache.Put(remote, *node.Id)
   535  				when, _ := time.Parse(timeFormat, *node.ModifiedDate) // FIXME
   536  				d := fs.NewDir(remote, when).SetID(*node.Id)
   537  				entries = append(entries, d)
   538  			case fileKind:
   539  				o, err := f.newObjectWithInfo(ctx, remote, node)
   540  				if err != nil {
   541  					iErr = err
   542  					return true
   543  				}
   544  				entries = append(entries, o)
   545  			default:
   546  				// ignore ASSET etc
   547  			}
   548  			return false
   549  		})
   550  		if iErr != nil {
   551  			return nil, iErr
   552  		}
   553  		if fserrors.IsRetryError(err) {
   554  			fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries)
   555  			continue
   556  		}
   557  		if err != nil {
   558  			return nil, err
   559  		}
   560  		break
   561  	}
   562  	return entries, nil
   563  }
   564  
   565  // checkUpload checks to see if an error occurred after the file was
   566  // completely uploaded.
   567  //
   568  // If it was then it waits for a while to see if the file really
   569  // exists and is the right size and returns an updated info.
   570  //
   571  // If the file wasn't found or was the wrong size then it returns the
   572  // original error.
   573  //
   574  // This is a workaround for Amazon sometimes returning
   575  //
   576  //  * 408 REQUEST_TIMEOUT
   577  //  * 504 GATEWAY_TIMEOUT
   578  //  * 500 Internal server error
   579  //
   580  // At the end of large uploads.  The speculation is that the timeout
   581  // is waiting for the sha1 hashing to complete and the file may well
   582  // be properly uploaded.
   583  func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
   584  	// Return if no error - all is well
   585  	if inErr == nil {
   586  		return false, inInfo, inErr
   587  	}
   588  	// If not one of the errors we can fix return
   589  	// if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
   590  	// 	return false, inInfo, inErr
   591  	// }
   592  
   593  	// The HTTP status
   594  	httpStatus := "HTTP status UNKNOWN"
   595  	if resp != nil {
   596  		httpStatus = resp.Status
   597  	}
   598  
   599  	// check to see if we read to the end
   600  	buf := make([]byte, 1)
   601  	n, err := in.Read(buf)
   602  	if !(n == 0 && err == io.EOF) {
   603  		fs.Debugf(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus)
   604  		return false, inInfo, inErr
   605  	}
   606  
   607  	// Don't wait for uploads - assume they will appear later
   608  	if f.opt.UploadWaitPerGB <= 0 {
   609  		fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
   610  		return false, inInfo, inErr
   611  	}
   612  
   613  	// Time we should wait for the upload
   614  	uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
   615  	timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
   616  
   617  	const sleepTime = 5 * time.Second                        // sleep between tries
   618  	retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up
   619  
   620  	fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
   621  	remote := src.Remote()
   622  	for i := 1; i <= retries; i++ {
   623  		o, err := f.NewObject(ctx, remote)
   624  		if err == fs.ErrorObjectNotFound {
   625  			fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
   626  		} else if err != nil {
   627  			fs.Debugf(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
   628  		} else {
   629  			if src.Size() == o.Size() {
   630  				fs.Debugf(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1))
   631  				info = &acd.File{
   632  					Node: o.(*Object).info,
   633  				}
   634  				return true, info, nil
   635  			}
   636  			fs.Debugf(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
   637  		}
   638  		time.Sleep(sleepTime)
   639  	}
   640  	fs.Debugf(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus)
   641  	return false, inInfo, inErr
   642  }
   643  
   644  // Put the object into the container
   645  //
   646  // Copy the reader in to the new object which is returned
   647  //
   648  // The new object may have been created if an error is returned
   649  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   650  	remote := src.Remote()
   651  	size := src.Size()
   652  	// Temporary Object under construction
   653  	o := &Object{
   654  		fs:     f,
   655  		remote: remote,
   656  	}
   657  	// Check if object already exists
   658  	err := o.readMetaData(ctx)
   659  	switch err {
   660  	case nil:
   661  		return o, o.Update(ctx, in, src, options...)
   662  	case fs.ErrorObjectNotFound:
   663  		// Not found so create it
   664  	default:
   665  		return nil, err
   666  	}
   667  	// If not create it
   668  	leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
   669  	if err != nil {
   670  		return nil, err
   671  	}
   672  	if size > warnFileSize {
   673  		fs.Logf(f, "Warning: file %q may fail because it is too big. Use --max-size=%dM to skip large files.", remote, warnFileSize>>20)
   674  	}
   675  	folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
   676  	var info *acd.File
   677  	var resp *http.Response
   678  	err = f.pacer.CallNoRetry(func() (bool, error) {
   679  		start := time.Now()
   680  		f.tokenRenewer.Start()
   681  		info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf))
   682  		f.tokenRenewer.Stop()
   683  		var ok bool
   684  		ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
   685  		if ok {
   686  			return false, nil
   687  		}
   688  		return f.shouldRetry(resp, err)
   689  	})
   690  	if err != nil {
   691  		return nil, err
   692  	}
   693  	o.info = info.Node
   694  	return o, nil
   695  }
   696  
   697  // Mkdir creates the container if it doesn't exist
   698  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   699  	err := f.dirCache.FindRoot(ctx, true)
   700  	if err != nil {
   701  		return err
   702  	}
   703  	if dir != "" {
   704  		_, err = f.dirCache.FindDir(ctx, dir, true)
   705  	}
   706  	return err
   707  }
   708  
   709  // Move src to this remote using server side move operations.
   710  //
   711  // This is stored with the remote path given
   712  //
   713  // It returns the destination Object and a possible error
   714  //
   715  // Will only be called if src.Fs().Name() == f.Name()
   716  //
   717  // If it isn't possible then return fs.ErrorCantMove
   718  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   719  	//  go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
   720  	srcObj, ok := src.(*Object)
   721  	if !ok {
   722  		fs.Debugf(src, "Can't move - not same remote type")
   723  		return nil, fs.ErrorCantMove
   724  	}
   725  
   726  	// create the destination directory if necessary
   727  	err := f.dirCache.FindRoot(ctx, true)
   728  	if err != nil {
   729  		return nil, err
   730  	}
   731  	srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
   732  	if err != nil {
   733  		return nil, err
   734  	}
   735  	dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
   736  	if err != nil {
   737  		return nil, err
   738  	}
   739  	err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
   740  	if err != nil {
   741  		return nil, err
   742  	}
   743  	// Wait for directory caching so we can no longer see the old
   744  	// object and see the new object
   745  	time.Sleep(200 * time.Millisecond) // enough time 90% of the time
   746  	var (
   747  		dstObj         fs.Object
   748  		srcErr, dstErr error
   749  	)
   750  	for i := 1; i <= fs.Config.LowLevelRetries; i++ {
   751  		_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
   752  		if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
   753  			// exit if error on source
   754  			return nil, srcErr
   755  		}
   756  		dstObj, dstErr = f.NewObject(ctx, remote)
   757  		if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
   758  			// exit if error on dst
   759  			return nil, dstErr
   760  		}
   761  		if srcErr == fs.ErrorObjectNotFound && dstErr == nil {
   762  			// finished if src not found and dst found
   763  			break
   764  		}
   765  		fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
   766  		time.Sleep(1 * time.Second)
   767  	}
   768  	return dstObj, dstErr
   769  }
   770  
   771  // DirCacheFlush resets the directory cache - used in testing as an
   772  // optional interface
   773  func (f *Fs) DirCacheFlush() {
   774  	f.dirCache.ResetRoot()
   775  }
   776  
   777  // DirMove moves src, srcRemote to this remote at dstRemote
   778  // using server side move operations.
   779  //
   780  // Will only be called if src.Fs().Name() == f.Name()
   781  //
   782  // If it isn't possible then return fs.ErrorCantDirMove
   783  //
   784  // If destination exists then return fs.ErrorDirExists
   785  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
   786  	srcFs, ok := src.(*Fs)
   787  	if !ok {
   788  		fs.Debugf(src, "DirMove error: not same remote type")
   789  		return fs.ErrorCantDirMove
   790  	}
   791  	srcPath := path.Join(srcFs.root, srcRemote)
   792  	dstPath := path.Join(f.root, dstRemote)
   793  
   794  	// Refuse to move to or from the root
   795  	if srcPath == "" || dstPath == "" {
   796  		fs.Debugf(src, "DirMove error: Can't move root")
   797  		return errors.New("can't move root directory")
   798  	}
   799  
   800  	// find the root src directory
   801  	err = srcFs.dirCache.FindRoot(ctx, false)
   802  	if err != nil {
   803  		return err
   804  	}
   805  
   806  	// find the root dst directory
   807  	if dstRemote != "" {
   808  		err = f.dirCache.FindRoot(ctx, true)
   809  		if err != nil {
   810  			return err
   811  		}
   812  	} else {
   813  		if f.dirCache.FoundRoot() {
   814  			return fs.ErrorDirExists
   815  		}
   816  	}
   817  
   818  	// Find ID of dst parent, creating subdirs if necessary
   819  	findPath := dstRemote
   820  	if dstRemote == "" {
   821  		findPath = f.root
   822  	}
   823  	dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
   824  	if err != nil {
   825  		return err
   826  	}
   827  
   828  	// Check destination does not exist
   829  	if dstRemote != "" {
   830  		_, err = f.dirCache.FindDir(ctx, dstRemote, false)
   831  		if err == fs.ErrorDirNotFound {
   832  			// OK
   833  		} else if err != nil {
   834  			return err
   835  		} else {
   836  			return fs.ErrorDirExists
   837  		}
   838  	}
   839  
   840  	// Find ID of src parent
   841  	findPath = srcRemote
   842  	var srcDirectoryID string
   843  	if srcRemote == "" {
   844  		srcDirectoryID, err = srcFs.dirCache.RootParentID()
   845  	} else {
   846  		_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
   847  	}
   848  	if err != nil {
   849  		return err
   850  	}
   851  	srcLeaf, _ := dircache.SplitPath(srcPath)
   852  
   853  	// Find ID of src
   854  	srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
   855  	if err != nil {
   856  		return err
   857  	}
   858  
   859  	// FIXME make a proper node.UpdateMetadata command
   860  	srcInfo := acd.NodeFromId(srcID, f.c.Nodes)
   861  	var jsonStr string
   862  	err = srcFs.pacer.Call(func() (bool, error) {
   863  		jsonStr, err = srcInfo.GetMetadata()
   864  		return srcFs.shouldRetry(nil, err)
   865  	})
   866  	if err != nil {
   867  		fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
   868  		return err
   869  	}
   870  	err = json.Unmarshal([]byte(jsonStr), &srcInfo)
   871  	if err != nil {
   872  		fs.Debugf(src, "DirMove error: error reading unpacking src metadata: %v", err)
   873  		return err
   874  	}
   875  
   876  	err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
   877  	if err != nil {
   878  		return err
   879  	}
   880  
   881  	srcFs.dirCache.FlushDir(srcRemote)
   882  	return nil
   883  }
   884  
   885  // purgeCheck remotes the root directory, if check is set then it
   886  // refuses to do so if it has anything in
   887  func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
   888  	root := path.Join(f.root, dir)
   889  	if root == "" {
   890  		return errors.New("can't purge root directory")
   891  	}
   892  	dc := f.dirCache
   893  	err := dc.FindRoot(ctx, false)
   894  	if err != nil {
   895  		return err
   896  	}
   897  	rootID, err := dc.FindDir(ctx, dir, false)
   898  	if err != nil {
   899  		return err
   900  	}
   901  
   902  	if check {
   903  		// check directory is empty
   904  		empty := true
   905  		_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
   906  			switch *node.Kind {
   907  			case folderKind:
   908  				empty = false
   909  				return true
   910  			case fileKind:
   911  				empty = false
   912  				return true
   913  			default:
   914  				fs.Debugf("Found ASSET %s", *node.Id)
   915  			}
   916  			return false
   917  		})
   918  		if err != nil {
   919  			return err
   920  		}
   921  		if !empty {
   922  			return errors.New("directory not empty")
   923  		}
   924  	}
   925  
   926  	node := acd.NodeFromId(rootID, f.c.Nodes)
   927  	var resp *http.Response
   928  	err = f.pacer.Call(func() (bool, error) {
   929  		resp, err = node.Trash()
   930  		return f.shouldRetry(resp, err)
   931  	})
   932  	if err != nil {
   933  		return err
   934  	}
   935  
   936  	f.dirCache.FlushDir(dir)
   937  	if err != nil {
   938  		return err
   939  	}
   940  	return nil
   941  }
   942  
   943  // Rmdir deletes the root folder
   944  //
   945  // Returns an error if it isn't empty
   946  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   947  	return f.purgeCheck(ctx, dir, true)
   948  }
   949  
   950  // Precision return the precision of this Fs
   951  func (f *Fs) Precision() time.Duration {
   952  	return fs.ModTimeNotSupported
   953  }
   954  
   955  // Hashes returns the supported hash sets.
   956  func (f *Fs) Hashes() hash.Set {
   957  	return hash.Set(hash.MD5)
   958  }
   959  
   960  // Copy src to this remote using server side copy operations.
   961  //
   962  // This is stored with the remote path given
   963  //
   964  // It returns the destination Object and a possible error
   965  //
   966  // Will only be called if src.Fs().Name() == f.Name()
   967  //
   968  // If it isn't possible then return fs.ErrorCantCopy
   969  //func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   970  // srcObj, ok := src.(*Object)
   971  // if !ok {
   972  // 	fs.Debugf(src, "Can't copy - not same remote type")
   973  // 	return nil, fs.ErrorCantCopy
   974  // }
   975  // srcFs := srcObj.fs
   976  // _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
   977  // if err != nil {
   978  // 	return nil, err
   979  // }
   980  // return f.NewObject(ctx, remote), nil
   981  //}
   982  
   983  // Purge deletes all the files and the container
   984  //
   985  // Optional interface: Only implement this if you have a way of
   986  // deleting all the files quicker than just running Remove() on the
   987  // result of List()
   988  func (f *Fs) Purge(ctx context.Context) error {
   989  	return f.purgeCheck(ctx, "", false)
   990  }
   991  
   992  // ------------------------------------------------------------
   993  
   994  // Fs returns the parent Fs
   995  func (o *Object) Fs() fs.Info {
   996  	return o.fs
   997  }
   998  
   999  // Return a string version
  1000  func (o *Object) String() string {
  1001  	if o == nil {
  1002  		return "<nil>"
  1003  	}
  1004  	return o.remote
  1005  }
  1006  
  1007  // Remote returns the remote path
  1008  func (o *Object) Remote() string {
  1009  	return o.remote
  1010  }
  1011  
  1012  // Hash returns the Md5sum of an object returning a lowercase hex string
  1013  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1014  	if t != hash.MD5 {
  1015  		return "", hash.ErrUnsupported
  1016  	}
  1017  	if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil {
  1018  		return *o.info.ContentProperties.Md5, nil
  1019  	}
  1020  	return "", nil
  1021  }
  1022  
  1023  // Size returns the size of an object in bytes
  1024  func (o *Object) Size() int64 {
  1025  	if o.info.ContentProperties != nil && o.info.ContentProperties.Size != nil {
  1026  		return int64(*o.info.ContentProperties.Size)
  1027  	}
  1028  	return 0 // Object is likely PENDING
  1029  }
  1030  
  1031  // readMetaData gets the metadata if it hasn't already been fetched
  1032  //
  1033  // it also sets the info
  1034  //
  1035  // If it can't be found it returns the error fs.ErrorObjectNotFound.
  1036  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1037  	if o.info != nil {
  1038  		return nil
  1039  	}
  1040  	leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
  1041  	if err != nil {
  1042  		if err == fs.ErrorDirNotFound {
  1043  			return fs.ErrorObjectNotFound
  1044  		}
  1045  		return err
  1046  	}
  1047  	folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
  1048  	var resp *http.Response
  1049  	var info *acd.File
  1050  	err = o.fs.pacer.Call(func() (bool, error) {
  1051  		info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
  1052  		return o.fs.shouldRetry(resp, err)
  1053  	})
  1054  	if err != nil {
  1055  		if err == acd.ErrorNodeNotFound {
  1056  			return fs.ErrorObjectNotFound
  1057  		}
  1058  		return err
  1059  	}
  1060  	o.info = info.Node
  1061  	return nil
  1062  }
  1063  
  1064  // ModTime returns the modification time of the object
  1065  //
  1066  //
  1067  // It attempts to read the objects mtime and if that isn't present the
  1068  // LastModified returned in the http headers
  1069  func (o *Object) ModTime(ctx context.Context) time.Time {
  1070  	err := o.readMetaData(ctx)
  1071  	if err != nil {
  1072  		fs.Debugf(o, "Failed to read metadata: %v", err)
  1073  		return time.Now()
  1074  	}
  1075  	modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
  1076  	if err != nil {
  1077  		fs.Debugf(o, "Failed to read mtime from object: %v", err)
  1078  		return time.Now()
  1079  	}
  1080  	return modTime
  1081  }
  1082  
  1083  // SetModTime sets the modification time of the local fs object
  1084  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1085  	// FIXME not implemented
  1086  	return fs.ErrorCantSetModTime
  1087  }
  1088  
  1089  // Storable returns a boolean showing whether this object storable
  1090  func (o *Object) Storable() bool {
  1091  	return true
  1092  }
  1093  
  1094  // Open an object for read
  1095  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1096  	bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
  1097  	if bigObject {
  1098  		fs.Debugf(o, "Downloading large object via tempLink")
  1099  	}
  1100  	file := acd.File{Node: o.info}
  1101  	var resp *http.Response
  1102  	headers := fs.OpenOptionHeaders(options)
  1103  	err = o.fs.pacer.Call(func() (bool, error) {
  1104  		if !bigObject {
  1105  			in, resp, err = file.OpenHeaders(headers)
  1106  		} else {
  1107  			in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
  1108  		}
  1109  		return o.fs.shouldRetry(resp, err)
  1110  	})
  1111  	return in, err
  1112  }
  1113  
  1114  // Update the object with the contents of the io.Reader, modTime and size
  1115  //
  1116  // The new object may have been created if an error is returned
  1117  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1118  	file := acd.File{Node: o.info}
  1119  	var info *acd.File
  1120  	var resp *http.Response
  1121  	var err error
  1122  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1123  		start := time.Now()
  1124  		o.fs.tokenRenewer.Start()
  1125  		info, resp, err = file.Overwrite(in)
  1126  		o.fs.tokenRenewer.Stop()
  1127  		var ok bool
  1128  		ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
  1129  		if ok {
  1130  			return false, nil
  1131  		}
  1132  		return o.fs.shouldRetry(resp, err)
  1133  	})
  1134  	if err != nil {
  1135  		return err
  1136  	}
  1137  	o.info = info.Node
  1138  	return nil
  1139  }
  1140  
  1141  // Remove a node
  1142  func (f *Fs) removeNode(info *acd.Node) error {
  1143  	var resp *http.Response
  1144  	var err error
  1145  	err = f.pacer.Call(func() (bool, error) {
  1146  		resp, err = info.Trash()
  1147  		return f.shouldRetry(resp, err)
  1148  	})
  1149  	return err
  1150  }
  1151  
  1152  // Remove an object
  1153  func (o *Object) Remove(ctx context.Context) error {
  1154  	return o.fs.removeNode(o.info)
  1155  }
  1156  
  1157  // Restore a node
  1158  func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
  1159  	var resp *http.Response
  1160  	err = f.pacer.Call(func() (bool, error) {
  1161  		newInfo, resp, err = info.Restore()
  1162  		return f.shouldRetry(resp, err)
  1163  	})
  1164  	return newInfo, err
  1165  }
  1166  
  1167  // Changes name of given node
  1168  func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
  1169  	var resp *http.Response
  1170  	err = f.pacer.Call(func() (bool, error) {
  1171  		newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
  1172  		return f.shouldRetry(resp, err)
  1173  	})
  1174  	return newInfo, err
  1175  }
  1176  
  1177  // Replaces one parent with another, effectively moving the file. Leaves other
  1178  // parents untouched. ReplaceParent cannot be used when the file is trashed.
  1179  func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
  1180  	return f.pacer.Call(func() (bool, error) {
  1181  		resp, err := info.ReplaceParent(oldParentID, newParentID)
  1182  		return f.shouldRetry(resp, err)
  1183  	})
  1184  }
  1185  
  1186  // Adds one additional parent to object.
  1187  func (f *Fs) addParent(info *acd.Node, newParentID string) error {
  1188  	return f.pacer.Call(func() (bool, error) {
  1189  		resp, err := info.AddParent(newParentID)
  1190  		return f.shouldRetry(resp, err)
  1191  	})
  1192  }
  1193  
  1194  // Remove given parent from object, leaving the other possible
  1195  // parents untouched. Object can end up having no parents.
  1196  func (f *Fs) removeParent(info *acd.Node, parentID string) error {
  1197  	return f.pacer.Call(func() (bool, error) {
  1198  		resp, err := info.RemoveParent(parentID)
  1199  		return f.shouldRetry(resp, err)
  1200  	})
  1201  }
  1202  
  1203  // moveNode moves the node given from the srcLeaf,srcDirectoryID to
  1204  // the dstLeaf,dstDirectoryID
  1205  func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
  1206  	// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
  1207  	cantMove := fs.ErrorCantMove
  1208  	if useDirErrorMsgs {
  1209  		cantMove = fs.ErrorCantDirMove
  1210  	}
  1211  
  1212  	if len(srcInfo.Parents) > 1 && srcLeaf != dstLeaf {
  1213  		fs.Debugf(name, "Move error: object is attached to multiple parents and should be renamed. This would change the name of the node in all parents.")
  1214  		return cantMove
  1215  	}
  1216  
  1217  	if srcLeaf != dstLeaf {
  1218  		// fs.Debugf(name, "renaming")
  1219  		_, err = f.renameNode(srcInfo, dstLeaf)
  1220  		if err != nil {
  1221  			fs.Debugf(name, "Move: quick path rename failed: %v", err)
  1222  			goto OnConflict
  1223  		}
  1224  	}
  1225  	if srcDirectoryID != dstDirectoryID {
  1226  		// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
  1227  		err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
  1228  		if err != nil {
  1229  			fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
  1230  			return err
  1231  		}
  1232  	}
  1233  
  1234  	return nil
  1235  
  1236  OnConflict:
  1237  	fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
  1238  
  1239  	// fs.Debugf(name, "Trashing file")
  1240  	err = f.removeNode(srcInfo)
  1241  	if err != nil {
  1242  		fs.Debugf(name, "Move: remove node failed: %v", err)
  1243  		return err
  1244  	}
  1245  	// fs.Debugf(name, "Renaming file")
  1246  	_, err = f.renameNode(srcInfo, dstLeaf)
  1247  	if err != nil {
  1248  		fs.Debugf(name, "Move: rename node failed: %v", err)
  1249  		return err
  1250  	}
  1251  	// note: replacing parent is forbidden by API, modifying them individually is
  1252  	// okay though
  1253  	// fs.Debugf(name, "Adding target parent")
  1254  	err = f.addParent(srcInfo, dstDirectoryID)
  1255  	if err != nil {
  1256  		fs.Debugf(name, "Move: addParent failed: %v", err)
  1257  		return err
  1258  	}
  1259  	// fs.Debugf(name, "removing original parent")
  1260  	err = f.removeParent(srcInfo, srcDirectoryID)
  1261  	if err != nil {
  1262  		fs.Debugf(name, "Move: removeParent failed: %v", err)
  1263  		return err
  1264  	}
  1265  	// fs.Debugf(name, "Restoring")
  1266  	_, err = f.restoreNode(srcInfo)
  1267  	if err != nil {
  1268  		fs.Debugf(name, "Move: restoreNode node failed: %v", err)
  1269  		return err
  1270  	}
  1271  	return nil
  1272  }
  1273  
  1274  // MimeType of an Object if known, "" otherwise
  1275  func (o *Object) MimeType(ctx context.Context) string {
  1276  	if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
  1277  		return *o.info.ContentProperties.ContentType
  1278  	}
  1279  	return ""
  1280  }
  1281  
  1282  // ChangeNotify calls the passed function with a path that has had changes.
  1283  // If the implementation uses polling, it should adhere to the given interval.
  1284  //
  1285  // Automatically restarts itself in case of unexpected behaviour of the remote.
  1286  //
  1287  // Close the returned channel to stop being notified.
  1288  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
  1289  	checkpoint := f.opt.Checkpoint
  1290  
  1291  	go func() {
  1292  		var ticker *time.Ticker
  1293  		var tickerC <-chan time.Time
  1294  		for {
  1295  			select {
  1296  			case pollInterval, ok := <-pollIntervalChan:
  1297  				if !ok {
  1298  					if ticker != nil {
  1299  						ticker.Stop()
  1300  					}
  1301  					return
  1302  				}
  1303  				if pollInterval == 0 {
  1304  					if ticker != nil {
  1305  						ticker.Stop()
  1306  						ticker, tickerC = nil, nil
  1307  					}
  1308  				} else {
  1309  					ticker = time.NewTicker(pollInterval)
  1310  					tickerC = ticker.C
  1311  				}
  1312  			case <-tickerC:
  1313  				checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
  1314  				if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
  1315  					fs.Debugf(f, "Unable to save checkpoint: %v", err)
  1316  				}
  1317  			}
  1318  		}
  1319  	}()
  1320  }
  1321  
  1322  func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
  1323  	var err error
  1324  	var resp *http.Response
  1325  	var reachedEnd bool
  1326  	var csCount int
  1327  	var nodeCount int
  1328  
  1329  	fs.Debugf(f, "Checking for changes on remote (Checkpoint %q)", checkpoint)
  1330  	err = f.pacer.CallNoRetry(func() (bool, error) {
  1331  		resp, err = f.c.Changes.GetChangesFunc(&acd.ChangesOptions{
  1332  			Checkpoint:    checkpoint,
  1333  			IncludePurged: true,
  1334  		}, func(changeSet *acd.ChangeSet, err error) error {
  1335  			if err != nil {
  1336  				return err
  1337  			}
  1338  
  1339  			type entryType struct {
  1340  				path      string
  1341  				entryType fs.EntryType
  1342  			}
  1343  			var pathsToClear []entryType
  1344  			csCount++
  1345  			nodeCount += len(changeSet.Nodes)
  1346  			if changeSet.End {
  1347  				reachedEnd = true
  1348  			}
  1349  			if changeSet.Checkpoint != "" {
  1350  				checkpoint = changeSet.Checkpoint
  1351  			}
  1352  			for _, node := range changeSet.Nodes {
  1353  				if path, ok := f.dirCache.GetInv(*node.Id); ok {
  1354  					if node.IsFile() {
  1355  						pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
  1356  					} else {
  1357  						pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
  1358  					}
  1359  					continue
  1360  				}
  1361  
  1362  				if node.IsFile() {
  1363  					// translate the parent dir of this object
  1364  					if len(node.Parents) > 0 {
  1365  						if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
  1366  							// and append the drive file name to compute the full file name
  1367  							name := f.opt.Enc.ToStandardName(*node.Name)
  1368  							if len(path) > 0 {
  1369  								path = path + "/" + name
  1370  							} else {
  1371  								path = name
  1372  							}
  1373  							// this will now clear the actual file too
  1374  							pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
  1375  						}
  1376  					} else { // a true root object that is changed
  1377  						pathsToClear = append(pathsToClear, entryType{path: *node.Name, entryType: fs.EntryObject})
  1378  					}
  1379  				}
  1380  			}
  1381  
  1382  			visitedPaths := make(map[string]bool)
  1383  			for _, entry := range pathsToClear {
  1384  				if _, ok := visitedPaths[entry.path]; ok {
  1385  					continue
  1386  				}
  1387  				visitedPaths[entry.path] = true
  1388  				notifyFunc(entry.path, entry.entryType)
  1389  			}
  1390  
  1391  			return nil
  1392  		})
  1393  		return false, err
  1394  	})
  1395  	fs.Debugf(f, "Got %d ChangeSets with %d Nodes", csCount, nodeCount)
  1396  
  1397  	if err != nil && err != io.ErrUnexpectedEOF {
  1398  		fs.Debugf(f, "Failed to get Changes: %v", err)
  1399  		return checkpoint
  1400  	}
  1401  
  1402  	if reachedEnd {
  1403  		reachedEnd = false
  1404  		fs.Debugf(f, "All changes were processed. Waiting for more.")
  1405  	} else if checkpoint == "" {
  1406  		fs.Debugf(f, "Did not get any checkpoint, something went wrong! %+v", resp)
  1407  	}
  1408  	return checkpoint
  1409  }
  1410  
  1411  // ID returns the ID of the Object if known, or "" if not
  1412  func (o *Object) ID() string {
  1413  	if o.info.Id == nil {
  1414  		return ""
  1415  	}
  1416  	return *o.info.Id
  1417  }
  1418  
  1419  // Check the interfaces are satisfied
  1420  var (
  1421  	_ fs.Fs     = (*Fs)(nil)
  1422  	_ fs.Purger = (*Fs)(nil)
  1423  	//	_ fs.Copier   = (*Fs)(nil)
  1424  	_ fs.Mover           = (*Fs)(nil)
  1425  	_ fs.DirMover        = (*Fs)(nil)
  1426  	_ fs.DirCacheFlusher = (*Fs)(nil)
  1427  	_ fs.ChangeNotifier  = (*Fs)(nil)
  1428  	_ fs.Object          = (*Object)(nil)
  1429  	_ fs.MimeTyper       = &Object{}
  1430  	_ fs.IDer            = &Object{}
  1431  )