github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/amazonclouddrive/amazonclouddrive.go (about)

     1  // Package amazonclouddrive provides an interface to the Amazon Cloud
     2  // Drive object storage system.
     3  package amazonclouddrive
     4  
     5  /*
     6  FIXME make searching for directory in id and file in id more efficient
     7  - use the name: search parameter - remember the escaping rules
     8  - use Folder GetNode and GetFile
     9  
    10  FIXME make the default for no files and no dirs be (FILE & FOLDER) so
    11  we ignore assets completely!
    12  */
    13  
    14  import (
    15  	"context"
    16  	"encoding/json"
    17  	"fmt"
    18  	"io"
    19  	"log"
    20  	"net/http"
    21  	"path"
    22  	"strings"
    23  	"time"
    24  
    25  	acd "github.com/ncw/go-acd"
    26  	"github.com/ncw/rclone/fs"
    27  	"github.com/ncw/rclone/fs/config"
    28  	"github.com/ncw/rclone/fs/config/configmap"
    29  	"github.com/ncw/rclone/fs/config/configstruct"
    30  	"github.com/ncw/rclone/fs/fserrors"
    31  	"github.com/ncw/rclone/fs/fshttp"
    32  	"github.com/ncw/rclone/fs/hash"
    33  	"github.com/ncw/rclone/lib/dircache"
    34  	"github.com/ncw/rclone/lib/oauthutil"
    35  	"github.com/ncw/rclone/lib/pacer"
    36  	"github.com/pkg/errors"
    37  	"golang.org/x/oauth2"
    38  )
    39  
    40  const (
    41  	folderKind               = "FOLDER"
    42  	fileKind                 = "FILE"
    43  	statusAvailable          = "AVAILABLE"
    44  	timeFormat               = time.RFC3339 // 2014-03-07T22:31:12.173Z
    45  	minSleep                 = 20 * time.Millisecond
    46  	warnFileSize             = 50000 << 20            // Display warning for files larger than this size
    47  	defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
    48  )
    49  
    50  // Globals
    51  var (
    52  	// Description of how to auth for this app
    53  	acdConfig = &oauth2.Config{
    54  		Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
    55  		Endpoint: oauth2.Endpoint{
    56  			AuthURL:  "https://www.amazon.com/ap/oa",
    57  			TokenURL: "https://api.amazon.com/auth/o2/token",
    58  		},
    59  		ClientID:     "",
    60  		ClientSecret: "",
    61  		RedirectURL:  oauthutil.RedirectURL,
    62  	}
    63  )
    64  
    65  // Register with Fs
    66  func init() {
    67  	fs.Register(&fs.RegInfo{
    68  		Name:        "amazon cloud drive",
    69  		Prefix:      "acd",
    70  		Description: "Amazon Drive",
    71  		NewFs:       NewFs,
    72  		Config: func(name string, m configmap.Mapper) {
    73  			err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
    74  			if err != nil {
    75  				log.Fatalf("Failed to configure token: %v", err)
    76  			}
    77  		},
    78  		Options: []fs.Option{{
    79  			Name:     config.ConfigClientID,
    80  			Help:     "Amazon Application Client ID.",
    81  			Required: true,
    82  		}, {
    83  			Name:     config.ConfigClientSecret,
    84  			Help:     "Amazon Application Client Secret.",
    85  			Required: true,
    86  		}, {
    87  			Name:     config.ConfigAuthURL,
    88  			Help:     "Auth server URL.\nLeave blank to use Amazon's.",
    89  			Advanced: true,
    90  		}, {
    91  			Name:     config.ConfigTokenURL,
    92  			Help:     "Token server url.\nleave blank to use Amazon's.",
    93  			Advanced: true,
    94  		}, {
    95  			Name:     "checkpoint",
    96  			Help:     "Checkpoint for internal polling (debug).",
    97  			Hide:     fs.OptionHideBoth,
    98  			Advanced: true,
    99  		}, {
   100  			Name: "upload_wait_per_gb",
   101  			Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
   102  
   103  Sometimes Amazon Drive gives an error when a file has been fully
   104  uploaded but the file appears anyway after a little while.  This
   105  happens sometimes for files over 1GB in size and nearly every time for
   106  files bigger than 10GB. This parameter controls the time rclone waits
   107  for the file to appear.
   108  
   109  The default value for this parameter is 3 minutes per GB, so by
   110  default it will wait 3 minutes for every GB uploaded to see if the
   111  file appears.
   112  
   113  You can disable this feature by setting it to 0. This may cause
   114  conflict errors as rclone retries the failed upload but the file will
   115  most likely appear correctly eventually.
   116  
   117  These values were determined empirically by observing lots of uploads
   118  of big files for a range of file sizes.
   119  
   120  Upload with the "-v" flag to see more info about what rclone is doing
   121  in this situation.`,
   122  			Default:  fs.Duration(180 * time.Second),
   123  			Advanced: true,
   124  		}, {
   125  			Name: "templink_threshold",
   126  			Help: `Files >= this size will be downloaded via their tempLink.
   127  
   128  Files this size or more will be downloaded via their "tempLink". This
   129  is to work around a problem with Amazon Drive which blocks downloads
   130  of files bigger than about 10GB.  The default for this is 9GB which
   131  shouldn't need to be changed.
   132  
   133  To download files above this threshold, rclone requests a "tempLink"
   134  which downloads the file through a temporary URL directly from the
   135  underlying S3 storage.`,
   136  			Default:  defaultTempLinkThreshold,
   137  			Advanced: true,
   138  		}},
   139  	})
   140  }
   141  
   142  // Options defines the configuration for this backend
   143  type Options struct {
   144  	Checkpoint        string        `config:"checkpoint"`
   145  	UploadWaitPerGB   fs.Duration   `config:"upload_wait_per_gb"`
   146  	TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
   147  }
   148  
   149  // Fs represents a remote acd server
   150  type Fs struct {
   151  	name         string             // name of this remote
   152  	features     *fs.Features       // optional features
   153  	opt          Options            // options for this Fs
   154  	c            *acd.Client        // the connection to the acd server
   155  	noAuthClient *http.Client       // unauthenticated http client
   156  	root         string             // the path we are working on
   157  	dirCache     *dircache.DirCache // Map of directory path to directory id
   158  	pacer        *fs.Pacer          // pacer for API calls
   159  	trueRootID   string             // ID of true root directory
   160  	tokenRenewer *oauthutil.Renew   // renew the token on expiry
   161  }
   162  
   163  // Object describes a acd object
   164  //
   165  // Will definitely have info but maybe not meta
   166  type Object struct {
   167  	fs     *Fs       // what this object is part of
   168  	remote string    // The remote path
   169  	info   *acd.Node // Info from the acd object if known
   170  }
   171  
   172  // ------------------------------------------------------------
   173  
   174  // Name of the remote (as passed into NewFs)
   175  func (f *Fs) Name() string {
   176  	return f.name
   177  }
   178  
   179  // Root of the remote (as passed into NewFs)
   180  func (f *Fs) Root() string {
   181  	return f.root
   182  }
   183  
   184  // String converts this Fs to a string
   185  func (f *Fs) String() string {
   186  	return fmt.Sprintf("amazon drive root '%s'", f.root)
   187  }
   188  
   189  // Features returns the optional features of this Fs
   190  func (f *Fs) Features() *fs.Features {
   191  	return f.features
   192  }
   193  
   194  // parsePath parses an acd 'url'
   195  func parsePath(path string) (root string) {
   196  	root = strings.Trim(path, "/")
   197  	return
   198  }
   199  
   200  // retryErrorCodes is a slice of error codes that we will retry
   201  var retryErrorCodes = []int{
   202  	400, // Bad request (seen in "Next token is expired")
   203  	401, // Unauthorized (seen in "Token has expired")
   204  	408, // Request Timeout
   205  	429, // Rate exceeded.
   206  	500, // Get occasional 500 Internal Server Error
   207  	502, // Bad Gateway when doing big listings
   208  	503, // Service Unavailable
   209  	504, // Gateway Time-out
   210  }
   211  
   212  // shouldRetry returns a boolean as to whether this resp and err
   213  // deserve to be retried.  It returns the err as a convenience
   214  func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
   215  	if resp != nil {
   216  		if resp.StatusCode == 401 {
   217  			f.tokenRenewer.Invalidate()
   218  			fs.Debugf(f, "401 error received - invalidating token")
   219  			return true, err
   220  		}
   221  		// Work around receiving this error sporadically on authentication
   222  		//
   223  		// HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
   224  		if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
   225  			fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
   226  			return true, err
   227  		}
   228  	}
   229  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   230  }
   231  
   232  // If query parameters contain X-Amz-Algorithm remove Authorization header
   233  //
   234  // This happens when ACD redirects to S3 for the download.  The oauth
   235  // transport puts an Authorization header in which we need to remove
   236  // otherwise we get this message from AWS
   237  //
   238  // Only one auth mechanism allowed; only the X-Amz-Algorithm query
   239  // parameter, Signature query string parameter or the Authorization
   240  // header should be specified
   241  func filterRequest(req *http.Request) {
   242  	if req.URL.Query().Get("X-Amz-Algorithm") != "" {
   243  		fs.Debugf(nil, "Removing Authorization: header after redirect to S3")
   244  		req.Header.Del("Authorization")
   245  	}
   246  }
   247  
   248  // NewFs constructs an Fs from the path, container:path
   249  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   250  	ctx := context.Background()
   251  	// Parse config into Options struct
   252  	opt := new(Options)
   253  	err := configstruct.Set(m, opt)
   254  	if err != nil {
   255  		return nil, err
   256  	}
   257  	root = parsePath(root)
   258  	baseClient := fshttp.NewClient(fs.Config)
   259  	if do, ok := baseClient.Transport.(interface {
   260  		SetRequestFilter(f func(req *http.Request))
   261  	}); ok {
   262  		do.SetRequestFilter(filterRequest)
   263  	} else {
   264  		fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
   265  	}
   266  	oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
   267  	if err != nil {
   268  		return nil, errors.Wrap(err, "failed to configure Amazon Drive")
   269  	}
   270  
   271  	c := acd.NewClient(oAuthClient)
   272  	f := &Fs{
   273  		name:         name,
   274  		root:         root,
   275  		opt:          *opt,
   276  		c:            c,
   277  		pacer:        fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
   278  		noAuthClient: fshttp.NewClient(fs.Config),
   279  	}
   280  	f.features = (&fs.Features{
   281  		CaseInsensitive:         true,
   282  		ReadMimeType:            true,
   283  		CanHaveEmptyDirectories: true,
   284  	}).Fill(f)
   285  
   286  	// Renew the token in the background
   287  	f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
   288  		_, err := f.getRootInfo()
   289  		return err
   290  	})
   291  
   292  	// Update endpoints
   293  	var resp *http.Response
   294  	err = f.pacer.Call(func() (bool, error) {
   295  		_, resp, err = f.c.Account.GetEndpoints()
   296  		return f.shouldRetry(resp, err)
   297  	})
   298  	if err != nil {
   299  		return nil, errors.Wrap(err, "failed to get endpoints")
   300  	}
   301  
   302  	// Get rootID
   303  	rootInfo, err := f.getRootInfo()
   304  	if err != nil || rootInfo.Id == nil {
   305  		return nil, errors.Wrap(err, "failed to get root")
   306  	}
   307  	f.trueRootID = *rootInfo.Id
   308  
   309  	f.dirCache = dircache.New(root, f.trueRootID, f)
   310  
   311  	// Find the current root
   312  	err = f.dirCache.FindRoot(ctx, false)
   313  	if err != nil {
   314  		// Assume it is a file
   315  		newRoot, remote := dircache.SplitPath(root)
   316  		tempF := *f
   317  		tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
   318  		tempF.root = newRoot
   319  		// Make new Fs which is the parent
   320  		err = tempF.dirCache.FindRoot(ctx, false)
   321  		if err != nil {
   322  			// No root so return old f
   323  			return f, nil
   324  		}
   325  		_, err := tempF.newObjectWithInfo(ctx, remote, nil)
   326  		if err != nil {
   327  			if err == fs.ErrorObjectNotFound {
   328  				// File doesn't exist so return old f
   329  				return f, nil
   330  			}
   331  			return nil, err
   332  		}
   333  		// XXX: update the old f here instead of returning tempF, since
   334  		// `features` were already filled with functions having *f as a receiver.
   335  		// See https://github.com/ncw/rclone/issues/2182
   336  		f.dirCache = tempF.dirCache
   337  		f.root = tempF.root
   338  		// return an error with an fs which points to the parent
   339  		return f, fs.ErrorIsFile
   340  	}
   341  	return f, nil
   342  }
   343  
   344  // getRootInfo gets the root folder info
   345  func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
   346  	var resp *http.Response
   347  	err = f.pacer.Call(func() (bool, error) {
   348  		rootInfo, resp, err = f.c.Nodes.GetRoot()
   349  		return f.shouldRetry(resp, err)
   350  	})
   351  	return rootInfo, err
   352  }
   353  
   354  // Return an Object from a path
   355  //
   356  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   357  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
   358  	o := &Object{
   359  		fs:     f,
   360  		remote: remote,
   361  	}
   362  	if info != nil {
   363  		// Set info but not meta
   364  		o.info = info
   365  	} else {
   366  		err := o.readMetaData(ctx) // reads info and meta, returning an error
   367  		if err != nil {
   368  			return nil, err
   369  		}
   370  	}
   371  	return o, nil
   372  }
   373  
   374  // NewObject finds the Object at remote.  If it can't be found
   375  // it returns the error fs.ErrorObjectNotFound.
   376  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   377  	return f.newObjectWithInfo(ctx, remote, nil)
   378  }
   379  
   380  // FindLeaf finds a directory of name leaf in the folder with ID pathID
   381  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
   382  	//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
   383  	folder := acd.FolderFromId(pathID, f.c.Nodes)
   384  	var resp *http.Response
   385  	var subFolder *acd.Folder
   386  	err = f.pacer.Call(func() (bool, error) {
   387  		subFolder, resp, err = folder.GetFolder(leaf)
   388  		return f.shouldRetry(resp, err)
   389  	})
   390  	if err != nil {
   391  		if err == acd.ErrorNodeNotFound {
   392  			//fs.Debugf(f, "...Not found")
   393  			return "", false, nil
   394  		}
   395  		//fs.Debugf(f, "...Error %v", err)
   396  		return "", false, err
   397  	}
   398  	if subFolder.Status != nil && *subFolder.Status != statusAvailable {
   399  		fs.Debugf(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status)
   400  		time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
   401  		return "", false, nil
   402  	}
   403  	//fs.Debugf(f, "...Found(%q, %v)", *subFolder.Id, leaf)
   404  	return *subFolder.Id, true, nil
   405  }
   406  
   407  // CreateDir makes a directory with pathID as parent and name leaf
   408  func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
   409  	//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
   410  	folder := acd.FolderFromId(pathID, f.c.Nodes)
   411  	var resp *http.Response
   412  	var info *acd.Folder
   413  	err = f.pacer.Call(func() (bool, error) {
   414  		info, resp, err = folder.CreateFolder(leaf)
   415  		return f.shouldRetry(resp, err)
   416  	})
   417  	if err != nil {
   418  		//fmt.Printf("...Error %v\n", err)
   419  		return "", err
   420  	}
   421  	//fmt.Printf("...Id %q\n", *info.Id)
   422  	return *info.Id, nil
   423  }
   424  
   425  // list the objects into the function supplied
   426  //
   427  // If directories is set it only sends directories
   428  // User function to process a File item from listAll
   429  //
   430  // Should return true to finish processing
   431  type listAllFn func(*acd.Node) bool
   432  
   433  // Lists the directory required calling the user function on each item found
   434  //
   435  // If the user fn ever returns true then it early exits with found = true
   436  func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
   437  	query := "parents:" + dirID
   438  	if directoriesOnly {
   439  		query += " AND kind:" + folderKind
   440  	} else if filesOnly {
   441  		query += " AND kind:" + fileKind
   442  	} else {
   443  		// FIXME none of these work
   444  		//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
   445  		//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
   446  	}
   447  	opts := acd.NodeListOptions{
   448  		Filters: query,
   449  	}
   450  	var nodes []*acd.Node
   451  	var out []*acd.Node
   452  	//var resp *http.Response
   453  	for {
   454  		var resp *http.Response
   455  		err = f.pacer.CallNoRetry(func() (bool, error) {
   456  			nodes, resp, err = f.c.Nodes.GetNodes(&opts)
   457  			return f.shouldRetry(resp, err)
   458  		})
   459  		if err != nil {
   460  			return false, err
   461  		}
   462  		if nodes == nil {
   463  			break
   464  		}
   465  		for _, node := range nodes {
   466  			if node.Name != nil && node.Id != nil && node.Kind != nil && node.Status != nil {
   467  				// Ignore nodes if not AVAILABLE
   468  				if *node.Status != statusAvailable {
   469  					continue
   470  				}
   471  				// Ignore bogus nodes Amazon Drive sometimes reports
   472  				hasValidParent := false
   473  				for _, parent := range node.Parents {
   474  					if parent == dirID {
   475  						hasValidParent = true
   476  						break
   477  					}
   478  				}
   479  				if !hasValidParent {
   480  					continue
   481  				}
   482  				// Store the nodes up in case we have to retry the listing
   483  				out = append(out, node)
   484  			}
   485  		}
   486  	}
   487  	// Send the nodes now
   488  	for _, node := range out {
   489  		if fn(node) {
   490  			found = true
   491  			break
   492  		}
   493  	}
   494  	return
   495  }
   496  
   497  // List the objects and directories in dir into entries.  The
   498  // entries can be returned in any order but should be for a
   499  // complete directory.
   500  //
   501  // dir should be "" to list the root, and should not have
   502  // trailing slashes.
   503  //
   504  // This should return ErrDirNotFound if the directory isn't
   505  // found.
   506  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   507  	err = f.dirCache.FindRoot(ctx, false)
   508  	if err != nil {
   509  		return nil, err
   510  	}
   511  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
   512  	if err != nil {
   513  		return nil, err
   514  	}
   515  	maxTries := fs.Config.LowLevelRetries
   516  	var iErr error
   517  	for tries := 1; tries <= maxTries; tries++ {
   518  		entries = nil
   519  		_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
   520  			remote := path.Join(dir, *node.Name)
   521  			switch *node.Kind {
   522  			case folderKind:
   523  				// cache the directory ID for later lookups
   524  				f.dirCache.Put(remote, *node.Id)
   525  				when, _ := time.Parse(timeFormat, *node.ModifiedDate) // FIXME
   526  				d := fs.NewDir(remote, when).SetID(*node.Id)
   527  				entries = append(entries, d)
   528  			case fileKind:
   529  				o, err := f.newObjectWithInfo(ctx, remote, node)
   530  				if err != nil {
   531  					iErr = err
   532  					return true
   533  				}
   534  				entries = append(entries, o)
   535  			default:
   536  				// ignore ASSET etc
   537  			}
   538  			return false
   539  		})
   540  		if iErr != nil {
   541  			return nil, iErr
   542  		}
   543  		if fserrors.IsRetryError(err) {
   544  			fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries)
   545  			continue
   546  		}
   547  		if err != nil {
   548  			return nil, err
   549  		}
   550  		break
   551  	}
   552  	return entries, nil
   553  }
   554  
   555  // checkUpload checks to see if an error occurred after the file was
   556  // completely uploaded.
   557  //
   558  // If it was then it waits for a while to see if the file really
   559  // exists and is the right size and returns an updated info.
   560  //
   561  // If the file wasn't found or was the wrong size then it returns the
   562  // original error.
   563  //
   564  // This is a workaround for Amazon sometimes returning
   565  //
   566  //  * 408 REQUEST_TIMEOUT
   567  //  * 504 GATEWAY_TIMEOUT
   568  //  * 500 Internal server error
   569  //
   570  // At the end of large uploads.  The speculation is that the timeout
   571  // is waiting for the sha1 hashing to complete and the file may well
   572  // be properly uploaded.
   573  func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
   574  	// Return if no error - all is well
   575  	if inErr == nil {
   576  		return false, inInfo, inErr
   577  	}
   578  	// If not one of the errors we can fix return
   579  	// if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
   580  	// 	return false, inInfo, inErr
   581  	// }
   582  
   583  	// The HTTP status
   584  	httpStatus := "HTTP status UNKNOWN"
   585  	if resp != nil {
   586  		httpStatus = resp.Status
   587  	}
   588  
   589  	// check to see if we read to the end
   590  	buf := make([]byte, 1)
   591  	n, err := in.Read(buf)
   592  	if !(n == 0 && err == io.EOF) {
   593  		fs.Debugf(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus)
   594  		return false, inInfo, inErr
   595  	}
   596  
   597  	// Don't wait for uploads - assume they will appear later
   598  	if f.opt.UploadWaitPerGB <= 0 {
   599  		fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
   600  		return false, inInfo, inErr
   601  	}
   602  
   603  	// Time we should wait for the upload
   604  	uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
   605  	timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
   606  
   607  	const sleepTime = 5 * time.Second                        // sleep between tries
   608  	retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up
   609  
   610  	fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
   611  	remote := src.Remote()
   612  	for i := 1; i <= retries; i++ {
   613  		o, err := f.NewObject(ctx, remote)
   614  		if err == fs.ErrorObjectNotFound {
   615  			fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
   616  		} else if err != nil {
   617  			fs.Debugf(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
   618  		} else {
   619  			if src.Size() == o.Size() {
   620  				fs.Debugf(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1))
   621  				info = &acd.File{
   622  					Node: o.(*Object).info,
   623  				}
   624  				return true, info, nil
   625  			}
   626  			fs.Debugf(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
   627  		}
   628  		time.Sleep(sleepTime)
   629  	}
   630  	fs.Debugf(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus)
   631  	return false, inInfo, inErr
   632  }
   633  
   634  // Put the object into the container
   635  //
   636  // Copy the reader in to the new object which is returned
   637  //
   638  // The new object may have been created if an error is returned
   639  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   640  	remote := src.Remote()
   641  	size := src.Size()
   642  	// Temporary Object under construction
   643  	o := &Object{
   644  		fs:     f,
   645  		remote: remote,
   646  	}
   647  	// Check if object already exists
   648  	err := o.readMetaData(ctx)
   649  	switch err {
   650  	case nil:
   651  		return o, o.Update(ctx, in, src, options...)
   652  	case fs.ErrorObjectNotFound:
   653  		// Not found so create it
   654  	default:
   655  		return nil, err
   656  	}
   657  	// If not create it
   658  	leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
   659  	if err != nil {
   660  		return nil, err
   661  	}
   662  	if size > warnFileSize {
   663  		fs.Logf(f, "Warning: file %q may fail because it is too big. Use --max-size=%dM to skip large files.", remote, warnFileSize>>20)
   664  	}
   665  	folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
   666  	var info *acd.File
   667  	var resp *http.Response
   668  	err = f.pacer.CallNoRetry(func() (bool, error) {
   669  		start := time.Now()
   670  		f.tokenRenewer.Start()
   671  		info, resp, err = folder.Put(in, leaf)
   672  		f.tokenRenewer.Stop()
   673  		var ok bool
   674  		ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
   675  		if ok {
   676  			return false, nil
   677  		}
   678  		return f.shouldRetry(resp, err)
   679  	})
   680  	if err != nil {
   681  		return nil, err
   682  	}
   683  	o.info = info.Node
   684  	return o, nil
   685  }
   686  
   687  // Mkdir creates the container if it doesn't exist
   688  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   689  	err := f.dirCache.FindRoot(ctx, true)
   690  	if err != nil {
   691  		return err
   692  	}
   693  	if dir != "" {
   694  		_, err = f.dirCache.FindDir(ctx, dir, true)
   695  	}
   696  	return err
   697  }
   698  
   699  // Move src to this remote using server side move operations.
   700  //
   701  // This is stored with the remote path given
   702  //
   703  // It returns the destination Object and a possible error
   704  //
   705  // Will only be called if src.Fs().Name() == f.Name()
   706  //
   707  // If it isn't possible then return fs.ErrorCantMove
   708  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   709  	//  go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
   710  	srcObj, ok := src.(*Object)
   711  	if !ok {
   712  		fs.Debugf(src, "Can't move - not same remote type")
   713  		return nil, fs.ErrorCantMove
   714  	}
   715  
   716  	// create the destination directory if necessary
   717  	err := f.dirCache.FindRoot(ctx, true)
   718  	if err != nil {
   719  		return nil, err
   720  	}
   721  	srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
   722  	if err != nil {
   723  		return nil, err
   724  	}
   725  	dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
   726  	if err != nil {
   727  		return nil, err
   728  	}
   729  	err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
   730  	if err != nil {
   731  		return nil, err
   732  	}
   733  	// Wait for directory caching so we can no longer see the old
   734  	// object and see the new object
   735  	time.Sleep(200 * time.Millisecond) // enough time 90% of the time
   736  	var (
   737  		dstObj         fs.Object
   738  		srcErr, dstErr error
   739  	)
   740  	for i := 1; i <= fs.Config.LowLevelRetries; i++ {
   741  		_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
   742  		if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
   743  			// exit if error on source
   744  			return nil, srcErr
   745  		}
   746  		dstObj, dstErr = f.NewObject(ctx, remote)
   747  		if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
   748  			// exit if error on dst
   749  			return nil, dstErr
   750  		}
   751  		if srcErr == fs.ErrorObjectNotFound && dstErr == nil {
   752  			// finished if src not found and dst found
   753  			break
   754  		}
   755  		fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
   756  		time.Sleep(1 * time.Second)
   757  	}
   758  	return dstObj, dstErr
   759  }
   760  
   761  // DirCacheFlush resets the directory cache - used in testing as an
   762  // optional interface
   763  func (f *Fs) DirCacheFlush() {
   764  	f.dirCache.ResetRoot()
   765  }
   766  
   767  // DirMove moves src, srcRemote to this remote at dstRemote
   768  // using server side move operations.
   769  //
   770  // Will only be called if src.Fs().Name() == f.Name()
   771  //
   772  // If it isn't possible then return fs.ErrorCantDirMove
   773  //
   774  // If destination exists then return fs.ErrorDirExists
   775  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
   776  	srcFs, ok := src.(*Fs)
   777  	if !ok {
   778  		fs.Debugf(src, "DirMove error: not same remote type")
   779  		return fs.ErrorCantDirMove
   780  	}
   781  	srcPath := path.Join(srcFs.root, srcRemote)
   782  	dstPath := path.Join(f.root, dstRemote)
   783  
   784  	// Refuse to move to or from the root
   785  	if srcPath == "" || dstPath == "" {
   786  		fs.Debugf(src, "DirMove error: Can't move root")
   787  		return errors.New("can't move root directory")
   788  	}
   789  
   790  	// find the root src directory
   791  	err = srcFs.dirCache.FindRoot(ctx, false)
   792  	if err != nil {
   793  		return err
   794  	}
   795  
   796  	// find the root dst directory
   797  	if dstRemote != "" {
   798  		err = f.dirCache.FindRoot(ctx, true)
   799  		if err != nil {
   800  			return err
   801  		}
   802  	} else {
   803  		if f.dirCache.FoundRoot() {
   804  			return fs.ErrorDirExists
   805  		}
   806  	}
   807  
   808  	// Find ID of dst parent, creating subdirs if necessary
   809  	findPath := dstRemote
   810  	if dstRemote == "" {
   811  		findPath = f.root
   812  	}
   813  	dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
   814  	if err != nil {
   815  		return err
   816  	}
   817  
   818  	// Check destination does not exist
   819  	if dstRemote != "" {
   820  		_, err = f.dirCache.FindDir(ctx, dstRemote, false)
   821  		if err == fs.ErrorDirNotFound {
   822  			// OK
   823  		} else if err != nil {
   824  			return err
   825  		} else {
   826  			return fs.ErrorDirExists
   827  		}
   828  	}
   829  
   830  	// Find ID of src parent
   831  	findPath = srcRemote
   832  	var srcDirectoryID string
   833  	if srcRemote == "" {
   834  		srcDirectoryID, err = srcFs.dirCache.RootParentID()
   835  	} else {
   836  		_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
   837  	}
   838  	if err != nil {
   839  		return err
   840  	}
   841  	srcLeaf, _ := dircache.SplitPath(srcPath)
   842  
   843  	// Find ID of src
   844  	srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
   845  	if err != nil {
   846  		return err
   847  	}
   848  
   849  	// FIXME make a proper node.UpdateMetadata command
   850  	srcInfo := acd.NodeFromId(srcID, f.c.Nodes)
   851  	var jsonStr string
   852  	err = srcFs.pacer.Call(func() (bool, error) {
   853  		jsonStr, err = srcInfo.GetMetadata()
   854  		return srcFs.shouldRetry(nil, err)
   855  	})
   856  	if err != nil {
   857  		fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
   858  		return err
   859  	}
   860  	err = json.Unmarshal([]byte(jsonStr), &srcInfo)
   861  	if err != nil {
   862  		fs.Debugf(src, "DirMove error: error reading unpacking src metadata: %v", err)
   863  		return err
   864  	}
   865  
   866  	err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
   867  	if err != nil {
   868  		return err
   869  	}
   870  
   871  	srcFs.dirCache.FlushDir(srcRemote)
   872  	return nil
   873  }
   874  
   875  // purgeCheck remotes the root directory, if check is set then it
   876  // refuses to do so if it has anything in
   877  func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
   878  	root := path.Join(f.root, dir)
   879  	if root == "" {
   880  		return errors.New("can't purge root directory")
   881  	}
   882  	dc := f.dirCache
   883  	err := dc.FindRoot(ctx, false)
   884  	if err != nil {
   885  		return err
   886  	}
   887  	rootID, err := dc.FindDir(ctx, dir, false)
   888  	if err != nil {
   889  		return err
   890  	}
   891  
   892  	if check {
   893  		// check directory is empty
   894  		empty := true
   895  		_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
   896  			switch *node.Kind {
   897  			case folderKind:
   898  				empty = false
   899  				return true
   900  			case fileKind:
   901  				empty = false
   902  				return true
   903  			default:
   904  				fs.Debugf("Found ASSET %s", *node.Id)
   905  			}
   906  			return false
   907  		})
   908  		if err != nil {
   909  			return err
   910  		}
   911  		if !empty {
   912  			return errors.New("directory not empty")
   913  		}
   914  	}
   915  
   916  	node := acd.NodeFromId(rootID, f.c.Nodes)
   917  	var resp *http.Response
   918  	err = f.pacer.Call(func() (bool, error) {
   919  		resp, err = node.Trash()
   920  		return f.shouldRetry(resp, err)
   921  	})
   922  	if err != nil {
   923  		return err
   924  	}
   925  
   926  	f.dirCache.FlushDir(dir)
   927  	if err != nil {
   928  		return err
   929  	}
   930  	return nil
   931  }
   932  
   933  // Rmdir deletes the root folder
   934  //
   935  // Returns an error if it isn't empty
   936  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   937  	return f.purgeCheck(ctx, dir, true)
   938  }
   939  
   940  // Precision return the precision of this Fs
   941  func (f *Fs) Precision() time.Duration {
   942  	return fs.ModTimeNotSupported
   943  }
   944  
   945  // Hashes returns the supported hash sets.
   946  func (f *Fs) Hashes() hash.Set {
   947  	return hash.Set(hash.MD5)
   948  }
   949  
   950  // Copy src to this remote using server side copy operations.
   951  //
   952  // This is stored with the remote path given
   953  //
   954  // It returns the destination Object and a possible error
   955  //
   956  // Will only be called if src.Fs().Name() == f.Name()
   957  //
   958  // If it isn't possible then return fs.ErrorCantCopy
   959  //func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   960  // srcObj, ok := src.(*Object)
   961  // if !ok {
   962  // 	fs.Debugf(src, "Can't copy - not same remote type")
   963  // 	return nil, fs.ErrorCantCopy
   964  // }
   965  // srcFs := srcObj.fs
   966  // _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
   967  // if err != nil {
   968  // 	return nil, err
   969  // }
   970  // return f.NewObject(ctx, remote), nil
   971  //}
   972  
   973  // Purge deletes all the files and the container
   974  //
   975  // Optional interface: Only implement this if you have a way of
   976  // deleting all the files quicker than just running Remove() on the
   977  // result of List()
   978  func (f *Fs) Purge(ctx context.Context) error {
   979  	return f.purgeCheck(ctx, "", false)
   980  }
   981  
   982  // ------------------------------------------------------------
   983  
   984  // Fs returns the parent Fs
   985  func (o *Object) Fs() fs.Info {
   986  	return o.fs
   987  }
   988  
   989  // Return a string version
   990  func (o *Object) String() string {
   991  	if o == nil {
   992  		return "<nil>"
   993  	}
   994  	return o.remote
   995  }
   996  
   997  // Remote returns the remote path
   998  func (o *Object) Remote() string {
   999  	return o.remote
  1000  }
  1001  
  1002  // Hash returns the Md5sum of an object returning a lowercase hex string
  1003  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1004  	if t != hash.MD5 {
  1005  		return "", hash.ErrUnsupported
  1006  	}
  1007  	if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil {
  1008  		return *o.info.ContentProperties.Md5, nil
  1009  	}
  1010  	return "", nil
  1011  }
  1012  
  1013  // Size returns the size of an object in bytes
  1014  func (o *Object) Size() int64 {
  1015  	if o.info.ContentProperties != nil && o.info.ContentProperties.Size != nil {
  1016  		return int64(*o.info.ContentProperties.Size)
  1017  	}
  1018  	return 0 // Object is likely PENDING
  1019  }
  1020  
  1021  // readMetaData gets the metadata if it hasn't already been fetched
  1022  //
  1023  // it also sets the info
  1024  //
  1025  // If it can't be found it returns the error fs.ErrorObjectNotFound.
  1026  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1027  	if o.info != nil {
  1028  		return nil
  1029  	}
  1030  	leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
  1031  	if err != nil {
  1032  		if err == fs.ErrorDirNotFound {
  1033  			return fs.ErrorObjectNotFound
  1034  		}
  1035  		return err
  1036  	}
  1037  	folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
  1038  	var resp *http.Response
  1039  	var info *acd.File
  1040  	err = o.fs.pacer.Call(func() (bool, error) {
  1041  		info, resp, err = folder.GetFile(leaf)
  1042  		return o.fs.shouldRetry(resp, err)
  1043  	})
  1044  	if err != nil {
  1045  		if err == acd.ErrorNodeNotFound {
  1046  			return fs.ErrorObjectNotFound
  1047  		}
  1048  		return err
  1049  	}
  1050  	o.info = info.Node
  1051  	return nil
  1052  }
  1053  
  1054  // ModTime returns the modification time of the object
  1055  //
  1056  //
  1057  // It attempts to read the objects mtime and if that isn't present the
  1058  // LastModified returned in the http headers
  1059  func (o *Object) ModTime(ctx context.Context) time.Time {
  1060  	err := o.readMetaData(ctx)
  1061  	if err != nil {
  1062  		fs.Debugf(o, "Failed to read metadata: %v", err)
  1063  		return time.Now()
  1064  	}
  1065  	modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
  1066  	if err != nil {
  1067  		fs.Debugf(o, "Failed to read mtime from object: %v", err)
  1068  		return time.Now()
  1069  	}
  1070  	return modTime
  1071  }
  1072  
  1073  // SetModTime sets the modification time of the local fs object
  1074  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1075  	// FIXME not implemented
  1076  	return fs.ErrorCantSetModTime
  1077  }
  1078  
  1079  // Storable returns a boolean showing whether this object storable
  1080  func (o *Object) Storable() bool {
  1081  	return true
  1082  }
  1083  
  1084  // Open an object for read
  1085  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1086  	bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
  1087  	if bigObject {
  1088  		fs.Debugf(o, "Downloading large object via tempLink")
  1089  	}
  1090  	file := acd.File{Node: o.info}
  1091  	var resp *http.Response
  1092  	headers := fs.OpenOptionHeaders(options)
  1093  	err = o.fs.pacer.Call(func() (bool, error) {
  1094  		if !bigObject {
  1095  			in, resp, err = file.OpenHeaders(headers)
  1096  		} else {
  1097  			in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
  1098  		}
  1099  		return o.fs.shouldRetry(resp, err)
  1100  	})
  1101  	return in, err
  1102  }
  1103  
  1104  // Update the object with the contents of the io.Reader, modTime and size
  1105  //
  1106  // The new object may have been created if an error is returned
  1107  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1108  	file := acd.File{Node: o.info}
  1109  	var info *acd.File
  1110  	var resp *http.Response
  1111  	var err error
  1112  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1113  		start := time.Now()
  1114  		o.fs.tokenRenewer.Start()
  1115  		info, resp, err = file.Overwrite(in)
  1116  		o.fs.tokenRenewer.Stop()
  1117  		var ok bool
  1118  		ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
  1119  		if ok {
  1120  			return false, nil
  1121  		}
  1122  		return o.fs.shouldRetry(resp, err)
  1123  	})
  1124  	if err != nil {
  1125  		return err
  1126  	}
  1127  	o.info = info.Node
  1128  	return nil
  1129  }
  1130  
  1131  // Remove a node
  1132  func (f *Fs) removeNode(info *acd.Node) error {
  1133  	var resp *http.Response
  1134  	var err error
  1135  	err = f.pacer.Call(func() (bool, error) {
  1136  		resp, err = info.Trash()
  1137  		return f.shouldRetry(resp, err)
  1138  	})
  1139  	return err
  1140  }
  1141  
  1142  // Remove an object
  1143  func (o *Object) Remove(ctx context.Context) error {
  1144  	return o.fs.removeNode(o.info)
  1145  }
  1146  
  1147  // Restore a node
  1148  func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
  1149  	var resp *http.Response
  1150  	err = f.pacer.Call(func() (bool, error) {
  1151  		newInfo, resp, err = info.Restore()
  1152  		return f.shouldRetry(resp, err)
  1153  	})
  1154  	return newInfo, err
  1155  }
  1156  
  1157  // Changes name of given node
  1158  func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
  1159  	var resp *http.Response
  1160  	err = f.pacer.Call(func() (bool, error) {
  1161  		newInfo, resp, err = info.Rename(newName)
  1162  		return f.shouldRetry(resp, err)
  1163  	})
  1164  	return newInfo, err
  1165  }
  1166  
  1167  // Replaces one parent with another, effectively moving the file. Leaves other
  1168  // parents untouched. ReplaceParent cannot be used when the file is trashed.
  1169  func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
  1170  	return f.pacer.Call(func() (bool, error) {
  1171  		resp, err := info.ReplaceParent(oldParentID, newParentID)
  1172  		return f.shouldRetry(resp, err)
  1173  	})
  1174  }
  1175  
  1176  // Adds one additional parent to object.
  1177  func (f *Fs) addParent(info *acd.Node, newParentID string) error {
  1178  	return f.pacer.Call(func() (bool, error) {
  1179  		resp, err := info.AddParent(newParentID)
  1180  		return f.shouldRetry(resp, err)
  1181  	})
  1182  }
  1183  
  1184  // Remove given parent from object, leaving the other possible
  1185  // parents untouched. Object can end up having no parents.
  1186  func (f *Fs) removeParent(info *acd.Node, parentID string) error {
  1187  	return f.pacer.Call(func() (bool, error) {
  1188  		resp, err := info.RemoveParent(parentID)
  1189  		return f.shouldRetry(resp, err)
  1190  	})
  1191  }
  1192  
  1193  // moveNode moves the node given from the srcLeaf,srcDirectoryID to
  1194  // the dstLeaf,dstDirectoryID
  1195  func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
  1196  	// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
  1197  	cantMove := fs.ErrorCantMove
  1198  	if useDirErrorMsgs {
  1199  		cantMove = fs.ErrorCantDirMove
  1200  	}
  1201  
  1202  	if len(srcInfo.Parents) > 1 && srcLeaf != dstLeaf {
  1203  		fs.Debugf(name, "Move error: object is attached to multiple parents and should be renamed. This would change the name of the node in all parents.")
  1204  		return cantMove
  1205  	}
  1206  
  1207  	if srcLeaf != dstLeaf {
  1208  		// fs.Debugf(name, "renaming")
  1209  		_, err = f.renameNode(srcInfo, dstLeaf)
  1210  		if err != nil {
  1211  			fs.Debugf(name, "Move: quick path rename failed: %v", err)
  1212  			goto OnConflict
  1213  		}
  1214  	}
  1215  	if srcDirectoryID != dstDirectoryID {
  1216  		// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
  1217  		err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
  1218  		if err != nil {
  1219  			fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
  1220  			return err
  1221  		}
  1222  	}
  1223  
  1224  	return nil
  1225  
  1226  OnConflict:
  1227  	fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
  1228  
  1229  	// fs.Debugf(name, "Trashing file")
  1230  	err = f.removeNode(srcInfo)
  1231  	if err != nil {
  1232  		fs.Debugf(name, "Move: remove node failed: %v", err)
  1233  		return err
  1234  	}
  1235  	// fs.Debugf(name, "Renaming file")
  1236  	_, err = f.renameNode(srcInfo, dstLeaf)
  1237  	if err != nil {
  1238  		fs.Debugf(name, "Move: rename node failed: %v", err)
  1239  		return err
  1240  	}
  1241  	// note: replacing parent is forbidden by API, modifying them individually is
  1242  	// okay though
  1243  	// fs.Debugf(name, "Adding target parent")
  1244  	err = f.addParent(srcInfo, dstDirectoryID)
  1245  	if err != nil {
  1246  		fs.Debugf(name, "Move: addParent failed: %v", err)
  1247  		return err
  1248  	}
  1249  	// fs.Debugf(name, "removing original parent")
  1250  	err = f.removeParent(srcInfo, srcDirectoryID)
  1251  	if err != nil {
  1252  		fs.Debugf(name, "Move: removeParent failed: %v", err)
  1253  		return err
  1254  	}
  1255  	// fs.Debugf(name, "Restoring")
  1256  	_, err = f.restoreNode(srcInfo)
  1257  	if err != nil {
  1258  		fs.Debugf(name, "Move: restoreNode node failed: %v", err)
  1259  		return err
  1260  	}
  1261  	return nil
  1262  }
  1263  
  1264  // MimeType of an Object if known, "" otherwise
  1265  func (o *Object) MimeType(ctx context.Context) string {
  1266  	if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
  1267  		return *o.info.ContentProperties.ContentType
  1268  	}
  1269  	return ""
  1270  }
  1271  
  1272  // ChangeNotify calls the passed function with a path that has had changes.
  1273  // If the implementation uses polling, it should adhere to the given interval.
  1274  //
  1275  // Automatically restarts itself in case of unexpected behaviour of the remote.
  1276  //
  1277  // Close the returned channel to stop being notified.
  1278  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
  1279  	checkpoint := f.opt.Checkpoint
  1280  
  1281  	go func() {
  1282  		var ticker *time.Ticker
  1283  		var tickerC <-chan time.Time
  1284  		for {
  1285  			select {
  1286  			case pollInterval, ok := <-pollIntervalChan:
  1287  				if !ok {
  1288  					if ticker != nil {
  1289  						ticker.Stop()
  1290  					}
  1291  					return
  1292  				}
  1293  				if pollInterval == 0 {
  1294  					if ticker != nil {
  1295  						ticker.Stop()
  1296  						ticker, tickerC = nil, nil
  1297  					}
  1298  				} else {
  1299  					ticker = time.NewTicker(pollInterval)
  1300  					tickerC = ticker.C
  1301  				}
  1302  			case <-tickerC:
  1303  				checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
  1304  				if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
  1305  					fs.Debugf(f, "Unable to save checkpoint: %v", err)
  1306  				}
  1307  			}
  1308  		}
  1309  	}()
  1310  }
  1311  
  1312  func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
  1313  	var err error
  1314  	var resp *http.Response
  1315  	var reachedEnd bool
  1316  	var csCount int
  1317  	var nodeCount int
  1318  
  1319  	fs.Debugf(f, "Checking for changes on remote (Checkpoint %q)", checkpoint)
  1320  	err = f.pacer.CallNoRetry(func() (bool, error) {
  1321  		resp, err = f.c.Changes.GetChangesFunc(&acd.ChangesOptions{
  1322  			Checkpoint:    checkpoint,
  1323  			IncludePurged: true,
  1324  		}, func(changeSet *acd.ChangeSet, err error) error {
  1325  			if err != nil {
  1326  				return err
  1327  			}
  1328  
  1329  			type entryType struct {
  1330  				path      string
  1331  				entryType fs.EntryType
  1332  			}
  1333  			var pathsToClear []entryType
  1334  			csCount++
  1335  			nodeCount += len(changeSet.Nodes)
  1336  			if changeSet.End {
  1337  				reachedEnd = true
  1338  			}
  1339  			if changeSet.Checkpoint != "" {
  1340  				checkpoint = changeSet.Checkpoint
  1341  			}
  1342  			for _, node := range changeSet.Nodes {
  1343  				if path, ok := f.dirCache.GetInv(*node.Id); ok {
  1344  					if node.IsFile() {
  1345  						pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
  1346  					} else {
  1347  						pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
  1348  					}
  1349  					continue
  1350  				}
  1351  
  1352  				if node.IsFile() {
  1353  					// translate the parent dir of this object
  1354  					if len(node.Parents) > 0 {
  1355  						if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
  1356  							// and append the drive file name to compute the full file name
  1357  							if len(path) > 0 {
  1358  								path = path + "/" + *node.Name
  1359  							} else {
  1360  								path = *node.Name
  1361  							}
  1362  							// this will now clear the actual file too
  1363  							pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
  1364  						}
  1365  					} else { // a true root object that is changed
  1366  						pathsToClear = append(pathsToClear, entryType{path: *node.Name, entryType: fs.EntryObject})
  1367  					}
  1368  				}
  1369  			}
  1370  
  1371  			visitedPaths := make(map[string]bool)
  1372  			for _, entry := range pathsToClear {
  1373  				if _, ok := visitedPaths[entry.path]; ok {
  1374  					continue
  1375  				}
  1376  				visitedPaths[entry.path] = true
  1377  				notifyFunc(entry.path, entry.entryType)
  1378  			}
  1379  
  1380  			return nil
  1381  		})
  1382  		return false, err
  1383  	})
  1384  	fs.Debugf(f, "Got %d ChangeSets with %d Nodes", csCount, nodeCount)
  1385  
  1386  	if err != nil && err != io.ErrUnexpectedEOF {
  1387  		fs.Debugf(f, "Failed to get Changes: %v", err)
  1388  		return checkpoint
  1389  	}
  1390  
  1391  	if reachedEnd {
  1392  		reachedEnd = false
  1393  		fs.Debugf(f, "All changes were processed. Waiting for more.")
  1394  	} else if checkpoint == "" {
  1395  		fs.Debugf(f, "Did not get any checkpoint, something went wrong! %+v", resp)
  1396  	}
  1397  	return checkpoint
  1398  }
  1399  
  1400  // ID returns the ID of the Object if known, or "" if not
  1401  func (o *Object) ID() string {
  1402  	if o.info.Id == nil {
  1403  		return ""
  1404  	}
  1405  	return *o.info.Id
  1406  }
  1407  
  1408  // Check the interfaces are satisfied
  1409  var (
  1410  	_ fs.Fs     = (*Fs)(nil)
  1411  	_ fs.Purger = (*Fs)(nil)
  1412  	//	_ fs.Copier   = (*Fs)(nil)
  1413  	_ fs.Mover           = (*Fs)(nil)
  1414  	_ fs.DirMover        = (*Fs)(nil)
  1415  	_ fs.DirCacheFlusher = (*Fs)(nil)
  1416  	_ fs.ChangeNotifier  = (*Fs)(nil)
  1417  	_ fs.Object          = (*Object)(nil)
  1418  	_ fs.MimeTyper       = &Object{}
  1419  	_ fs.IDer            = &Object{}
  1420  )