github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/sharefile/sharefile.go (about)

     1  // Package sharefile provides an interface to the Citrix Sharefile
     2  // object storage system.
     3  package sharefile
     4  
     5  //go:generate ./update-timezone.sh
     6  
     7  /* NOTES
     8  
     9  ## for docs
    10  
    11  Detail standard/chunked/streaming uploads?
    12  
    13  ## Bugs in API
    14  
    15  The times in updateItem are being parsed in EST/DST local time
    16  updateItem only sets times accurate to 1 second
    17  
    18  https://community.sharefilesupport.com/citrixsharefile/topics/bug-report-for-update-item-patch-items-id-setting-clientmodifieddate-ignores-timezone-and-milliseconds
    19  
    20  When doing a rename+move directory, the server appears to do the
    21  rename first in the local directory which can overwrite files of the
    22  same name in the local directory.
    23  
    24  https://community.sharefilesupport.com/citrixsharefile/topics/bug-report-for-update-item-patch-items-id-file-overwrite-under-certain-conditions
    25  
    26  The Copy command can't change the name at the same time which means we
    27  have to copy via a temporary directory.
    28  
    29  https://community.sharefilesupport.com/citrixsharefile/topics/copy-item-needs-to-be-able-to-set-a-new-name
    30  
    31  ## Allowed characters
    32  
    33  https://api.sharefile.com/rest/index/odata.aspx
    34  
    35  $select to limit returned fields
    36  https://www.odata.org/documentation/odata-version-3-0/odata-version-3-0-core-protocol/#theselectsystemqueryoption
    37  
    38  Also $filter to select only things we need
    39  
    40  https://support.citrix.com/article/CTX234774
    41  
    42  The following characters should not be used in folder or file names.
    43  
    44  \
    45  /
    46  .
    47  ,
    48  :
    49  ;
    50  *
    51  ?
    52  "
    53  <
    54  >
    55  A filename ending with a period without an extension
    56  File names with leading or trailing whitespaces.
    57  
    58  
    59  // sharefile
    60  stringNeedsEscaping = []byte{
    61  	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x2A, 0x2E, 0x2F, 0x3A, 0x3C, 0x3E, 0x3F, 0x7C, 0xEFBCBC
    62  }
    63  maxFileLength = 256
    64  canWriteUnnormalized = true
    65  canReadUnnormalized   = true
    66  canReadRenormalized   = false
    67  canStream = true
    68  
    69  Which is control chars + [' ', '*', '.', '/', ':', '<', '>', '?', '|']
    70  - also \ and "
    71  
    72  */
    73  
    74  import (
    75  	"context"
    76  	"encoding/json"
    77  	"fmt"
    78  	"io"
    79  	"io/ioutil"
    80  	"log"
    81  	"net/http"
    82  	"net/url"
    83  	"path"
    84  	"strings"
    85  	"time"
    86  
    87  	"github.com/pkg/errors"
    88  	"github.com/rclone/rclone/backend/sharefile/api"
    89  	"github.com/rclone/rclone/fs"
    90  	"github.com/rclone/rclone/fs/config"
    91  	"github.com/rclone/rclone/fs/config/configmap"
    92  	"github.com/rclone/rclone/fs/config/configstruct"
    93  	"github.com/rclone/rclone/fs/config/obscure"
    94  	"github.com/rclone/rclone/fs/fserrors"
    95  	"github.com/rclone/rclone/fs/hash"
    96  	"github.com/rclone/rclone/lib/dircache"
    97  	"github.com/rclone/rclone/lib/encoder"
    98  	"github.com/rclone/rclone/lib/oauthutil"
    99  	"github.com/rclone/rclone/lib/pacer"
   100  	"github.com/rclone/rclone/lib/random"
   101  	"github.com/rclone/rclone/lib/rest"
   102  	"golang.org/x/oauth2"
   103  )
   104  
   105  const (
   106  	rcloneClientID              = "djQUPlHTUM9EvayYBWuKC5IrVIoQde46"
   107  	rcloneEncryptedClientSecret = "v7572bKhUindQL3yDnUAebmgP-QxiwT38JLxVPolcZBl6SSs329MtFzH73x7BeELmMVZtneUPvALSopUZ6VkhQ"
   108  	minSleep                    = 10 * time.Millisecond
   109  	maxSleep                    = 2 * time.Second
   110  	decayConstant               = 2              // bigger for slower decay, exponential
   111  	apiPath                     = "/sf/v3"       // add to endpoint to get API path
   112  	tokenPath                   = "/oauth/token" // add to endpoint to get Token path
   113  	minChunkSize                = 256 * fs.KibiByte
   114  	maxChunkSize                = 2 * fs.GibiByte
   115  	defaultChunkSize            = 64 * fs.MebiByte
   116  	defaultUploadCutoff         = 128 * fs.MebiByte
   117  )
   118  
   119  // Generate a new oauth2 config which we will update when we know the TokenURL
   120  func newOauthConfig(tokenURL string) *oauth2.Config {
   121  	return &oauth2.Config{
   122  		Scopes: nil,
   123  		Endpoint: oauth2.Endpoint{
   124  			AuthURL:  "https://secure.sharefile.com/oauth/authorize",
   125  			TokenURL: tokenURL,
   126  		},
   127  		ClientID:     rcloneClientID,
   128  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
   129  		RedirectURL:  oauthutil.RedirectPublicSecureURL,
   130  	}
   131  }
   132  
   133  // Register with Fs
   134  func init() {
   135  	fs.Register(&fs.RegInfo{
   136  		Name:        "sharefile",
   137  		Description: "Citrix Sharefile",
   138  		NewFs:       NewFs,
   139  		Config: func(name string, m configmap.Mapper) {
   140  			oauthConfig := newOauthConfig("")
   141  			checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
   142  				if auth == nil || auth.Form == nil {
   143  					return errors.New("endpoint not found in response")
   144  				}
   145  				subdomain := auth.Form.Get("subdomain")
   146  				apicp := auth.Form.Get("apicp")
   147  				if subdomain == "" || apicp == "" {
   148  					return errors.Errorf("subdomain or apicp not found in response: %+v", auth.Form)
   149  				}
   150  				endpoint := "https://" + subdomain + "." + apicp
   151  				m.Set("endpoint", endpoint)
   152  				oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
   153  				return nil
   154  			}
   155  			opt := oauthutil.Options{
   156  				CheckAuth: checkAuth,
   157  			}
   158  			err := oauthutil.Config("sharefile", name, m, oauthConfig, &opt)
   159  			if err != nil {
   160  				log.Fatalf("Failed to configure token: %v", err)
   161  			}
   162  		},
   163  		Options: []fs.Option{{
   164  			Name:     "upload_cutoff",
   165  			Help:     "Cutoff for switching to multipart upload.",
   166  			Default:  defaultUploadCutoff,
   167  			Advanced: true,
   168  		}, {
   169  			Name: "root_folder_id",
   170  			Help: `ID of the root folder
   171  
   172  Leave blank to access "Personal Folders".  You can use one of the
   173  standard values here or any folder ID (long hex number ID).`,
   174  			Examples: []fs.OptionExample{{
   175  				Value: "",
   176  				Help:  `Access the Personal Folders. (Default)`,
   177  			}, {
   178  				Value: "favorites",
   179  				Help:  "Access the Favorites folder.",
   180  			}, {
   181  				Value: "allshared",
   182  				Help:  "Access all the shared folders.",
   183  			}, {
   184  				Value: "connectors",
   185  				Help:  "Access all the individual connectors.",
   186  			}, {
   187  				Value: "top",
   188  				Help:  "Access the home, favorites, and shared folders as well as the connectors.",
   189  			}},
   190  		}, {
   191  			Name:    "chunk_size",
   192  			Default: defaultChunkSize,
   193  			Help: `Upload chunk size. Must a power of 2 >= 256k.
   194  
   195  Making this larger will improve performance, but note that each chunk
   196  is buffered in memory one per transfer.
   197  
   198  Reducing this will reduce memory usage but decrease performance.`,
   199  			Advanced: true,
   200  		}, {
   201  			Name: "endpoint",
   202  			Help: `Endpoint for API calls.
   203  
   204  This is usually auto discovered as part of the oauth process, but can
   205  be set manually to something like: https://XXX.sharefile.com
   206  `,
   207  			Advanced: true,
   208  			Default:  "",
   209  		}, {
   210  			Name:     config.ConfigEncoding,
   211  			Help:     config.ConfigEncodingHelp,
   212  			Advanced: true,
   213  			Default: (encoder.Base |
   214  				encoder.EncodeWin | // :?"*<>|
   215  				encoder.EncodeBackSlash | // \
   216  				encoder.EncodeCtl |
   217  				encoder.EncodeRightSpace |
   218  				encoder.EncodeRightPeriod |
   219  				encoder.EncodeLeftSpace |
   220  				encoder.EncodeLeftPeriod |
   221  				encoder.EncodeInvalidUtf8),
   222  		}},
   223  	})
   224  }
   225  
   226  // Options defines the configuration for this backend
   227  type Options struct {
   228  	RootFolderID string               `config:"root_folder_id"`
   229  	UploadCutoff fs.SizeSuffix        `config:"upload_cutoff"`
   230  	ChunkSize    fs.SizeSuffix        `config:"chunk_size"`
   231  	Endpoint     string               `config:"endpoint"`
   232  	Enc          encoder.MultiEncoder `config:"encoding"`
   233  }
   234  
   235  // Fs represents a remote cloud storage system
   236  type Fs struct {
   237  	name         string             // name of this remote
   238  	root         string             // the path we are working on
   239  	opt          Options            // parsed options
   240  	features     *fs.Features       // optional features
   241  	srv          *rest.Client       // the connection to the server
   242  	dirCache     *dircache.DirCache // Map of directory path to directory id
   243  	pacer        *fs.Pacer          // pacer for API calls
   244  	bufferTokens chan []byte        // control concurrency of multipart uploads
   245  	tokenRenewer *oauthutil.Renew   // renew the token on expiry
   246  	rootID       string             // ID of the users root folder
   247  	location     *time.Location     // timezone of server for SetModTime workaround
   248  }
   249  
   250  // Object describes a file
   251  type Object struct {
   252  	fs          *Fs       // what this object is part of
   253  	remote      string    // The remote path
   254  	hasMetaData bool      // metadata is present and correct
   255  	size        int64     // size of the object
   256  	modTime     time.Time // modification time of the object
   257  	id          string    // ID of the object
   258  	md5         string    // hash of the object
   259  }
   260  
   261  // ------------------------------------------------------------
   262  
   263  // Name of the remote (as passed into NewFs)
   264  func (f *Fs) Name() string {
   265  	return f.name
   266  }
   267  
   268  // Root of the remote (as passed into NewFs)
   269  func (f *Fs) Root() string {
   270  	return f.root
   271  }
   272  
   273  // String converts this Fs to a string
   274  func (f *Fs) String() string {
   275  	return fmt.Sprintf("sharefile root '%s'", f.root)
   276  }
   277  
   278  // Features returns the optional features of this Fs
   279  func (f *Fs) Features() *fs.Features {
   280  	return f.features
   281  }
   282  
   283  // parsePath parses a sharefile 'url'
   284  func parsePath(path string) (root string) {
   285  	root = strings.Trim(path, "/")
   286  	return
   287  }
   288  
   289  // retryErrorCodes is a slice of error codes that we will retry
   290  var retryErrorCodes = []int{
   291  	429, // Too Many Requests.
   292  	500, // Internal Server Error
   293  	502, // Bad Gateway
   294  	503, // Service Unavailable
   295  	504, // Gateway Timeout
   296  	509, // Bandwidth Limit Exceeded
   297  }
   298  
   299  // shouldRetry returns a boolean as to whether this resp and err
   300  // deserve to be retried.  It returns the err as a convenience
   301  func shouldRetry(resp *http.Response, err error) (bool, error) {
   302  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   303  }
   304  
   305  // Reads the metadata for the id passed in.  If id is "" then it returns the root
   306  // if path is not "" then the item read use id as the root and the path is relative
   307  func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) {
   308  	opts := rest.Opts{
   309  		Method: "GET",
   310  		Path:   "/Items",
   311  		Parameters: url.Values{
   312  			"$select": {api.ListRequestSelect},
   313  		},
   314  	}
   315  	if id != "" {
   316  		opts.Path += "(" + id + ")"
   317  	}
   318  	if path != "" {
   319  		opts.Path += "/ByPath"
   320  		opts.Parameters.Set("path", "/"+f.opt.Enc.FromStandardPath(path))
   321  	}
   322  	var item api.Item
   323  	var resp *http.Response
   324  	err = f.pacer.Call(func() (bool, error) {
   325  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &item)
   326  		return shouldRetry(resp, err)
   327  	})
   328  	if err != nil {
   329  		if resp != nil && resp.StatusCode == http.StatusNotFound {
   330  			if filesOnly {
   331  				return nil, fs.ErrorObjectNotFound
   332  			}
   333  			return nil, fs.ErrorDirNotFound
   334  		}
   335  		return nil, errors.Wrap(err, "couldn't find item")
   336  	}
   337  	if directoriesOnly && item.Type != api.ItemTypeFolder {
   338  		return nil, fs.ErrorIsFile
   339  	}
   340  	if filesOnly && item.Type != api.ItemTypeFile {
   341  		return nil, fs.ErrorNotAFile
   342  	}
   343  	return &item, nil
   344  }
   345  
   346  // Reads the metadata for the id passed in.  If id is "" then it returns the root
   347  func (f *Fs) readMetaDataForID(ctx context.Context, id string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) {
   348  	return f.readMetaDataForIDPath(ctx, id, "", directoriesOnly, filesOnly)
   349  }
   350  
   351  // readMetaDataForPath reads the metadata from the path
   352  func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) {
   353  	leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
   354  	if err != nil {
   355  		if err == fs.ErrorDirNotFound {
   356  			return nil, fs.ErrorObjectNotFound
   357  		}
   358  		return nil, err
   359  	}
   360  	return f.readMetaDataForIDPath(ctx, directoryID, leaf, directoriesOnly, filesOnly)
   361  }
   362  
   363  // errorHandler parses a non 2xx error response into an error
   364  func errorHandler(resp *http.Response) error {
   365  	body, err := rest.ReadBody(resp)
   366  	if err != nil {
   367  		body = nil
   368  	}
   369  	var e = api.Error{
   370  		Code:   fmt.Sprint(resp.StatusCode),
   371  		Reason: resp.Status,
   372  	}
   373  	e.Message.Lang = "en"
   374  	e.Message.Value = string(body)
   375  	if body != nil {
   376  		_ = json.Unmarshal(body, &e)
   377  	}
   378  	return &e
   379  }
   380  
   381  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   382  	if cs < minChunkSize {
   383  		return errors.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize)
   384  	}
   385  	if cs > maxChunkSize {
   386  		return errors.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize)
   387  	}
   388  	return nil
   389  }
   390  
   391  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   392  	err = checkUploadChunkSize(cs)
   393  	if err == nil {
   394  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   395  		f.fillBufferTokens() // reset the buffer tokens
   396  	}
   397  	return
   398  }
   399  
   400  func checkUploadCutoff(cs fs.SizeSuffix) error {
   401  	return nil
   402  }
   403  
   404  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   405  	err = checkUploadCutoff(cs)
   406  	if err == nil {
   407  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
   408  	}
   409  	return
   410  }
   411  
   412  // NewFs constructs an Fs from the path, container:path
   413  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   414  	ctx := context.Background()
   415  	// Parse config into Options struct
   416  	opt := new(Options)
   417  	err := configstruct.Set(m, opt)
   418  	if err != nil {
   419  		return nil, err
   420  	}
   421  
   422  	// Check parameters OK
   423  	if opt.Endpoint == "" {
   424  		return nil, errors.New("endpoint not set: rebuild the remote or set manually")
   425  	}
   426  	err = checkUploadChunkSize(opt.ChunkSize)
   427  	if err != nil {
   428  		return nil, err
   429  	}
   430  	err = checkUploadCutoff(opt.UploadCutoff)
   431  	if err != nil {
   432  		return nil, err
   433  	}
   434  
   435  	root = parsePath(root)
   436  
   437  	oauthConfig := newOauthConfig(opt.Endpoint + tokenPath)
   438  	var client *http.Client
   439  	var ts *oauthutil.TokenSource
   440  	client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
   441  	if err != nil {
   442  		return nil, errors.Wrap(err, "failed to configure sharefile")
   443  	}
   444  
   445  	f := &Fs{
   446  		name:  name,
   447  		root:  root,
   448  		opt:   *opt,
   449  		srv:   rest.NewClient(client).SetRoot(opt.Endpoint + apiPath),
   450  		pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   451  	}
   452  	f.features = (&fs.Features{
   453  		CaseInsensitive:         true,
   454  		CanHaveEmptyDirectories: true,
   455  		ReadMimeType:            false,
   456  	}).Fill(f)
   457  	f.srv.SetErrorHandler(errorHandler)
   458  	f.fillBufferTokens()
   459  
   460  	// Renew the token in the background
   461  	if ts != nil {
   462  		f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
   463  			_, err := f.List(ctx, "")
   464  			return err
   465  		})
   466  	}
   467  
   468  	// Load the server timezone from an internal file
   469  	// Used to correct the time in SetModTime
   470  	const serverTimezone = "America/New_York"
   471  	timezone, err := tzdata.Open(serverTimezone)
   472  	if err != nil {
   473  		return nil, errors.Wrap(err, "failed to open timezone db")
   474  	}
   475  	tzdata, err := ioutil.ReadAll(timezone)
   476  	if err != nil {
   477  		return nil, errors.Wrap(err, "failed to read timezone")
   478  	}
   479  	_ = timezone.Close()
   480  	f.location, err = time.LoadLocationFromTZData(serverTimezone, tzdata)
   481  	if err != nil {
   482  		return nil, errors.Wrap(err, "failed to load location from timezone")
   483  	}
   484  
   485  	// Find ID of user's root folder
   486  	if opt.RootFolderID == "" {
   487  		item, err := f.readMetaDataForID(ctx, opt.RootFolderID, true, false)
   488  		if err != nil {
   489  			return nil, errors.Wrap(err, "couldn't find root ID")
   490  		}
   491  		f.rootID = item.ID
   492  	} else {
   493  		f.rootID = opt.RootFolderID
   494  	}
   495  
   496  	// Get rootID
   497  	f.dirCache = dircache.New(root, f.rootID, f)
   498  
   499  	// Find the current root
   500  	err = f.dirCache.FindRoot(ctx, false)
   501  	if err != nil {
   502  		// Assume it is a file
   503  		newRoot, remote := dircache.SplitPath(root)
   504  		tempF := *f
   505  		tempF.dirCache = dircache.New(newRoot, f.rootID, &tempF)
   506  		tempF.root = newRoot
   507  		// Make new Fs which is the parent
   508  		err = tempF.dirCache.FindRoot(ctx, false)
   509  		if err != nil {
   510  			// No root so return old f
   511  			return f, nil
   512  		}
   513  		_, err := tempF.newObjectWithInfo(ctx, remote, nil)
   514  		if err != nil {
   515  			if err == fs.ErrorObjectNotFound {
   516  				// File doesn't exist so return old f
   517  				return f, nil
   518  			}
   519  			return nil, err
   520  		}
   521  		f.features.Fill(&tempF)
   522  		// XXX: update the old f here instead of returning tempF, since
   523  		// `features` were already filled with functions having *f as a receiver.
   524  		// See https://github.com/rclone/rclone/issues/2182
   525  		f.dirCache = tempF.dirCache
   526  		f.root = tempF.root
   527  		// return an error with an fs which points to the parent
   528  		return f, fs.ErrorIsFile
   529  	}
   530  	return f, nil
   531  }
   532  
   533  // Fill up (or reset) the buffer tokens
   534  func (f *Fs) fillBufferTokens() {
   535  	f.bufferTokens = make(chan []byte, fs.Config.Transfers)
   536  	for i := 0; i < fs.Config.Transfers; i++ {
   537  		f.bufferTokens <- nil
   538  	}
   539  }
   540  
   541  // getUploadBlock gets a block from the pool of size chunkSize
   542  func (f *Fs) getUploadBlock() []byte {
   543  	buf := <-f.bufferTokens
   544  	if buf == nil {
   545  		buf = make([]byte, f.opt.ChunkSize)
   546  	}
   547  	// fs.Debugf(f, "Getting upload block %p", buf)
   548  	return buf
   549  }
   550  
   551  // putUploadBlock returns a block to the pool of size chunkSize
   552  func (f *Fs) putUploadBlock(buf []byte) {
   553  	buf = buf[:cap(buf)]
   554  	if len(buf) != int(f.opt.ChunkSize) {
   555  		panic("bad blocksize returned to pool")
   556  	}
   557  	// fs.Debugf(f, "Returning upload block %p", buf)
   558  	f.bufferTokens <- buf
   559  }
   560  
   561  // Return an Object from a path
   562  //
   563  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   564  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
   565  	o := &Object{
   566  		fs:     f,
   567  		remote: remote,
   568  	}
   569  	var err error
   570  	if info != nil {
   571  		// Set info
   572  		err = o.setMetaData(info)
   573  	} else {
   574  		err = o.readMetaData(ctx) // reads info and meta, returning an error
   575  	}
   576  	if err != nil {
   577  		return nil, err
   578  	}
   579  	return o, nil
   580  }
   581  
   582  // NewObject finds the Object at remote.  If it can't be found
   583  // it returns the error fs.ErrorObjectNotFound.
   584  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   585  	return f.newObjectWithInfo(ctx, remote, nil)
   586  }
   587  
   588  // FindLeaf finds a directory of name leaf in the folder with ID pathID
   589  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
   590  	if pathID == "top" {
   591  		// Find the leaf in pathID
   592  		found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
   593  			if item.Name == leaf {
   594  				pathIDOut = item.ID
   595  				return true
   596  			}
   597  			return false
   598  		})
   599  		return pathIDOut, found, err
   600  	}
   601  	info, err := f.readMetaDataForIDPath(ctx, pathID, leaf, true, false)
   602  	if err == nil {
   603  		found = true
   604  		pathIDOut = info.ID
   605  	} else if err == fs.ErrorDirNotFound {
   606  		err = nil // don't return an error if not found
   607  	}
   608  	return pathIDOut, found, err
   609  }
   610  
   611  // CreateDir makes a directory with pathID as parent and name leaf
   612  func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
   613  	var resp *http.Response
   614  	leaf = f.opt.Enc.FromStandardName(leaf)
   615  	var req = api.Item{
   616  		Name:      leaf,
   617  		FileName:  leaf,
   618  		CreatedAt: time.Now(),
   619  	}
   620  	var info api.Item
   621  	opts := rest.Opts{
   622  		Method: "POST",
   623  		Path:   "/Items(" + pathID + ")/Folder",
   624  		Parameters: url.Values{
   625  			"$select":     {api.ListRequestSelect},
   626  			"overwrite":   {"false"},
   627  			"passthrough": {"false"},
   628  		},
   629  	}
   630  	err = f.pacer.Call(func() (bool, error) {
   631  		resp, err = f.srv.CallJSON(ctx, &opts, &req, &info)
   632  		return shouldRetry(resp, err)
   633  	})
   634  	if err != nil {
   635  		return "", errors.Wrap(err, "CreateDir")
   636  	}
   637  	return info.ID, nil
   638  }
   639  
   640  // list the objects into the function supplied
   641  //
   642  // If directories is set it only sends directories
   643  // User function to process a File item from listAll
   644  //
   645  // Should return true to finish processing
   646  type listAllFn func(*api.Item) bool
   647  
   648  // Lists the directory required calling the user function on each item found
   649  //
   650  // If the user fn ever returns true then it early exits with found = true
   651  func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
   652  	opts := rest.Opts{
   653  		Method: "GET",
   654  		Path:   "/Items(" + dirID + ")/Children",
   655  		Parameters: url.Values{
   656  			"$select": {api.ListRequestSelect},
   657  		},
   658  	}
   659  
   660  	var result api.ListResponse
   661  	var resp *http.Response
   662  	err = f.pacer.Call(func() (bool, error) {
   663  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
   664  		return shouldRetry(resp, err)
   665  	})
   666  	if err != nil {
   667  		return found, errors.Wrap(err, "couldn't list files")
   668  	}
   669  	for i := range result.Value {
   670  		item := &result.Value[i]
   671  		if item.Type == api.ItemTypeFolder {
   672  			if filesOnly {
   673  				continue
   674  			}
   675  		} else if item.Type == api.ItemTypeFile {
   676  			if directoriesOnly {
   677  				continue
   678  			}
   679  		} else {
   680  			fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
   681  			continue
   682  		}
   683  		item.Name = f.opt.Enc.ToStandardName(item.Name)
   684  		if fn(item) {
   685  			found = true
   686  			break
   687  		}
   688  	}
   689  
   690  	return
   691  }
   692  
   693  // List the objects and directories in dir into entries.  The
   694  // entries can be returned in any order but should be for a
   695  // complete directory.
   696  //
   697  // dir should be "" to list the root, and should not have
   698  // trailing slashes.
   699  //
   700  // This should return ErrDirNotFound if the directory isn't
   701  // found.
   702  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   703  	err = f.dirCache.FindRoot(ctx, false)
   704  	if err != nil {
   705  		return nil, err
   706  	}
   707  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
   708  	if err != nil {
   709  		return nil, err
   710  	}
   711  	var iErr error
   712  	_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
   713  		remote := path.Join(dir, info.Name)
   714  		if info.Type == api.ItemTypeFolder {
   715  			// cache the directory ID for later lookups
   716  			f.dirCache.Put(remote, info.ID)
   717  			d := fs.NewDir(remote, info.CreatedAt).SetID(info.ID).SetSize(info.Size).SetItems(int64(info.FileCount))
   718  			entries = append(entries, d)
   719  		} else if info.Type == api.ItemTypeFile {
   720  			o, err := f.newObjectWithInfo(ctx, remote, info)
   721  			if err != nil {
   722  				iErr = err
   723  				return true
   724  			}
   725  			entries = append(entries, o)
   726  		}
   727  		return false
   728  	})
   729  	if err != nil {
   730  		return nil, err
   731  	}
   732  	if iErr != nil {
   733  		return nil, iErr
   734  	}
   735  	return entries, nil
   736  }
   737  
   738  // Creates from the parameters passed in a half finished Object which
   739  // must have setMetaData called on it
   740  //
   741  // Returns the object, leaf, directoryID and error
   742  //
   743  // Used to create new objects
   744  func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
   745  	// Create the directory for the object if it doesn't exist
   746  	leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
   747  	if err != nil {
   748  		return
   749  	}
   750  	// Temporary Object under construction
   751  	o = &Object{
   752  		fs:     f,
   753  		remote: remote,
   754  	}
   755  	return o, leaf, directoryID, nil
   756  }
   757  
   758  // Put the object
   759  //
   760  // Copy the reader in to the new object which is returned
   761  //
   762  // The new object may have been created if an error is returned
   763  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   764  	existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
   765  	switch err {
   766  	case nil:
   767  		return existingObj, existingObj.Update(ctx, in, src, options...)
   768  	case fs.ErrorObjectNotFound:
   769  		// Not found so create it
   770  		return f.PutUnchecked(ctx, in, src)
   771  	default:
   772  		return nil, err
   773  	}
   774  }
   775  
   776  // PutStream uploads to the remote path with the modTime given of indeterminate size
   777  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   778  	return f.Put(ctx, in, src, options...)
   779  }
   780  
   781  // PutUnchecked the object into the container
   782  //
   783  // This will produce an error if the object already exists
   784  //
   785  // Copy the reader in to the new object which is returned
   786  //
   787  // The new object may have been created if an error is returned
   788  func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   789  	remote := src.Remote()
   790  	size := src.Size()
   791  	modTime := src.ModTime(ctx)
   792  
   793  	o, _, _, err := f.createObject(ctx, remote, modTime, size)
   794  	if err != nil {
   795  		return nil, err
   796  	}
   797  	return o, o.Update(ctx, in, src, options...)
   798  }
   799  
   800  // Mkdir creates the container if it doesn't exist
   801  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   802  	err := f.dirCache.FindRoot(ctx, true)
   803  	if err != nil {
   804  		return err
   805  	}
   806  	if dir != "" {
   807  		_, err = f.dirCache.FindDir(ctx, dir, true)
   808  	}
   809  	return err
   810  }
   811  
   812  // purgeCheck removes the directory, if check is set then it refuses
   813  // to do so if it has anything in
   814  func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
   815  	root := path.Join(f.root, dir)
   816  	if root == "" {
   817  		return errors.New("can't purge root directory")
   818  	}
   819  	dc := f.dirCache
   820  	err := dc.FindRoot(ctx, false)
   821  	if err != nil {
   822  		return err
   823  	}
   824  	rootID, err := dc.FindDir(ctx, dir, false)
   825  	if err != nil {
   826  		return err
   827  	}
   828  
   829  	// need to check if empty as it will delete recursively by default
   830  	if check {
   831  		found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
   832  			return true
   833  		})
   834  		if err != nil {
   835  			return errors.Wrap(err, "purgeCheck")
   836  		}
   837  		if found {
   838  			return fs.ErrorDirectoryNotEmpty
   839  		}
   840  	}
   841  
   842  	err = f.remove(ctx, rootID)
   843  	f.dirCache.FlushDir(dir)
   844  	if err != nil {
   845  		return err
   846  	}
   847  	return nil
   848  }
   849  
   850  // Rmdir deletes the root folder
   851  //
   852  // Returns an error if it isn't empty
   853  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   854  	return f.purgeCheck(ctx, dir, true)
   855  }
   856  
   857  // Precision return the precision of this Fs
   858  func (f *Fs) Precision() time.Duration {
   859  	// sharefile returns times accurate to the millisecond, but
   860  	// for some reason these seem only accurate 2ms.
   861  	// updateItem seems to only set times accurate to 1 second though.
   862  	return time.Second // this doesn't appear to be documented anywhere
   863  }
   864  
   865  // Purge deletes all the files and the container
   866  //
   867  // Optional interface: Only implement this if you have a way of
   868  // deleting all the files quicker than just running Remove() on the
   869  // result of List()
   870  func (f *Fs) Purge(ctx context.Context) error {
   871  	return f.purgeCheck(ctx, "", false)
   872  }
   873  
   874  // updateItem patches a file or folder
   875  //
   876  // if leaf = "" or directoryID = "" or modTime == nil then it will be
   877  // left alone
   878  //
   879  // Note that this seems to work by renaming first, then moving to a
   880  // new directory which means that it can overwrite existing objects
   881  // :-(
   882  func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTime *time.Time) (info *api.Item, err error) {
   883  	// Move the object
   884  	opts := rest.Opts{
   885  		Method: "PATCH",
   886  		Path:   "/Items(" + id + ")",
   887  		Parameters: url.Values{
   888  			"$select":   {api.ListRequestSelect},
   889  			"overwrite": {"false"},
   890  		},
   891  	}
   892  	leaf = f.opt.Enc.FromStandardName(leaf)
   893  	// FIXME this appears to be a bug in the API
   894  	//
   895  	// If you set the modified time via PATCH then the server
   896  	// appears to parse it as a local time for America/New_York
   897  	//
   898  	// However if you set it when uploading the file then it is fine...
   899  	//
   900  	// Also it only sets the time to 1 second resolution where it
   901  	// uses 1ms resolution elsewhere
   902  	if modTime != nil && f.location != nil {
   903  		newTime := modTime.In(f.location)
   904  		isoTime := newTime.Format(time.RFC3339Nano)
   905  		// Chop TZ -05:00 off the end and replace with Z
   906  		isoTime = isoTime[:len(isoTime)-6] + "Z"
   907  		// Parse it back into a time
   908  		newModTime, err := time.Parse(time.RFC3339Nano, isoTime)
   909  		if err != nil {
   910  			return nil, errors.Wrap(err, "updateItem: time parse")
   911  		}
   912  		modTime = &newModTime
   913  	}
   914  	update := api.UpdateItemRequest{
   915  		Name:       leaf,
   916  		FileName:   leaf,
   917  		ModifiedAt: modTime,
   918  	}
   919  	if directoryID != "" {
   920  		update.Parent = &api.Parent{
   921  			ID: directoryID,
   922  		}
   923  	}
   924  	var resp *http.Response
   925  	err = f.pacer.Call(func() (bool, error) {
   926  		resp, err = f.srv.CallJSON(ctx, &opts, &update, &info)
   927  		return shouldRetry(resp, err)
   928  	})
   929  	if err != nil {
   930  		return nil, err
   931  	}
   932  	return info, nil
   933  }
   934  
   935  // move a file or folder
   936  //
   937  // This is complicated by the fact that we can't use updateItem to move
   938  // to a different directory AND rename at the same time as it can
   939  // overwrite files in the source directory.
   940  func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (item *api.Item, err error) {
   941  	// To demonstrate bug
   942  	// item, err = f.updateItem(ctx, id, newLeaf, newDirectoryID, nil)
   943  	// if err != nil {
   944  	// 	return nil, errors.Wrap(err, "Move rename leaf")
   945  	// }
   946  	// return item, nil
   947  	doRenameLeaf := oldLeaf != newLeaf
   948  	doMove := oldDirectoryID != newDirectoryID
   949  
   950  	// Now rename the leaf to a temporary name if we are moving to
   951  	// another directory to make sure we don't overwrite something
   952  	// in the source directory by accident
   953  	if doRenameLeaf && doMove {
   954  		tmpLeaf := newLeaf + "." + random.String(8)
   955  		item, err = f.updateItem(ctx, id, tmpLeaf, "", nil)
   956  		if err != nil {
   957  			return nil, errors.Wrap(err, "Move rename leaf")
   958  		}
   959  	}
   960  
   961  	// Move the object to a new directory (with the existing name)
   962  	// if required
   963  	if doMove {
   964  		item, err = f.updateItem(ctx, id, "", newDirectoryID, nil)
   965  		if err != nil {
   966  			return nil, errors.Wrap(err, "Move directory")
   967  		}
   968  	}
   969  
   970  	// Rename the leaf to its final name if required
   971  	if doRenameLeaf {
   972  		item, err = f.updateItem(ctx, id, newLeaf, "", nil)
   973  		if err != nil {
   974  			return nil, errors.Wrap(err, "Move rename leaf")
   975  		}
   976  	}
   977  
   978  	return item, nil
   979  }
   980  
   981  // Move src to this remote using server side move operations.
   982  //
   983  // This is stored with the remote path given
   984  //
   985  // It returns the destination Object and a possible error
   986  //
   987  // Will only be called if src.Fs().Name() == f.Name()
   988  //
   989  // If it isn't possible then return fs.ErrorCantMove
   990  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   991  	srcObj, ok := src.(*Object)
   992  	if !ok {
   993  		fs.Debugf(src, "Can't move - not same remote type")
   994  		return nil, fs.ErrorCantMove
   995  	}
   996  
   997  	// Find ID of src parent, not creating subdirs
   998  	srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
   999  	if err != nil {
  1000  		return nil, err
  1001  	}
  1002  
  1003  	// Create temporary object
  1004  	dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
  1005  	if err != nil {
  1006  		return nil, err
  1007  	}
  1008  
  1009  	// Do the move
  1010  	info, err := f.move(ctx, true, srcObj.id, srcLeaf, leaf, srcParentID, directoryID)
  1011  	if err != nil {
  1012  		return nil, err
  1013  	}
  1014  
  1015  	err = dstObj.setMetaData(info)
  1016  	if err != nil {
  1017  		return nil, err
  1018  	}
  1019  	return dstObj, nil
  1020  }
  1021  
  1022  // DirMove moves src, srcRemote to this remote at dstRemote
  1023  // using server side move operations.
  1024  //
  1025  // Will only be called if src.Fs().Name() == f.Name()
  1026  //
  1027  // If it isn't possible then return fs.ErrorCantDirMove
  1028  //
  1029  // If destination exists then return fs.ErrorDirExists
  1030  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1031  	srcFs, ok := src.(*Fs)
  1032  	if !ok {
  1033  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  1034  		return fs.ErrorCantDirMove
  1035  	}
  1036  	srcPath := path.Join(srcFs.root, srcRemote)
  1037  	dstPath := path.Join(f.root, dstRemote)
  1038  
  1039  	// Refuse to move to or from the root
  1040  	if srcPath == "" || dstPath == "" {
  1041  		fs.Debugf(src, "DirMove error: Can't move root")
  1042  		return errors.New("can't move root directory")
  1043  	}
  1044  
  1045  	// find the root src directory
  1046  	err := srcFs.dirCache.FindRoot(ctx, false)
  1047  	if err != nil {
  1048  		return err
  1049  	}
  1050  
  1051  	// find the root dst directory
  1052  	if dstRemote != "" {
  1053  		err = f.dirCache.FindRoot(ctx, true)
  1054  		if err != nil {
  1055  			return err
  1056  		}
  1057  	} else {
  1058  		if f.dirCache.FoundRoot() {
  1059  			return fs.ErrorDirExists
  1060  		}
  1061  	}
  1062  
  1063  	// Find ID of dst parent, creating subdirs if necessary
  1064  	var leaf, directoryID string
  1065  	findPath := dstRemote
  1066  	if dstRemote == "" {
  1067  		findPath = f.root
  1068  	}
  1069  	leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
  1070  	if err != nil {
  1071  		return err
  1072  	}
  1073  
  1074  	// Check destination does not exist
  1075  	if dstRemote != "" {
  1076  		_, err = f.dirCache.FindDir(ctx, dstRemote, false)
  1077  		if err == fs.ErrorDirNotFound {
  1078  			// OK
  1079  		} else if err != nil {
  1080  			return err
  1081  		} else {
  1082  			return fs.ErrorDirExists
  1083  		}
  1084  	}
  1085  
  1086  	// Find ID of src
  1087  	srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
  1088  	if err != nil {
  1089  		return err
  1090  	}
  1091  
  1092  	// Find ID of src parent, not creating subdirs
  1093  	var srcLeaf, srcDirectoryID string
  1094  	findPath = srcRemote
  1095  	if srcRemote == "" {
  1096  		findPath = srcFs.root
  1097  	}
  1098  	srcLeaf, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
  1099  	if err != nil {
  1100  		return err
  1101  	}
  1102  
  1103  	// Do the move
  1104  	_, err = f.move(ctx, false, srcID, srcLeaf, leaf, srcDirectoryID, directoryID)
  1105  	if err != nil {
  1106  		return err
  1107  	}
  1108  	srcFs.dirCache.FlushDir(srcRemote)
  1109  	return nil
  1110  }
  1111  
  1112  // Copy src to this remote using server side copy operations.
  1113  //
  1114  // This is stored with the remote path given
  1115  //
  1116  // It returns the destination Object and a possible error
  1117  //
  1118  // Will only be called if src.Fs().Name() == f.Name()
  1119  //
  1120  // If it isn't possible then return fs.ErrorCantCopy
  1121  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
  1122  	srcObj, ok := src.(*Object)
  1123  	if !ok {
  1124  		fs.Debugf(src, "Can't copy - not same remote type")
  1125  		return nil, fs.ErrorCantCopy
  1126  	}
  1127  
  1128  	err = srcObj.readMetaData(ctx)
  1129  	if err != nil {
  1130  		return nil, err
  1131  	}
  1132  
  1133  	// Find ID of src parent, not creating subdirs
  1134  	srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
  1135  	if err != nil {
  1136  		return nil, err
  1137  	}
  1138  	srcLeaf = f.opt.Enc.FromStandardName(srcLeaf)
  1139  	_ = srcParentID
  1140  
  1141  	// Create temporary object
  1142  	dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
  1143  	if err != nil {
  1144  		return nil, err
  1145  	}
  1146  	dstLeaf = f.opt.Enc.FromStandardName(dstLeaf)
  1147  
  1148  	sameName := strings.ToLower(srcLeaf) == strings.ToLower(dstLeaf)
  1149  	if sameName && srcParentID == dstParentID {
  1150  		return nil, errors.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf)
  1151  	}
  1152  
  1153  	// Discover whether we can just copy directly or not
  1154  	directCopy := false
  1155  	if sameName {
  1156  		// if copying to same name can copy directly
  1157  		directCopy = true
  1158  	} else {
  1159  		// if (dstParentID, srcLeaf) does not exist then can
  1160  		// Copy then Rename without fear of overwriting
  1161  		// something
  1162  		_, err := f.readMetaDataForIDPath(ctx, dstParentID, srcLeaf, false, false)
  1163  		if err == fs.ErrorObjectNotFound || err == fs.ErrorDirNotFound {
  1164  			directCopy = true
  1165  		} else if err != nil {
  1166  			return nil, errors.Wrap(err, "copy: failed to examine destination dir")
  1167  		} else {
  1168  			// otherwise need to copy via a temporary directlry
  1169  		}
  1170  	}
  1171  
  1172  	// Copy direct to destination unless !directCopy in which case
  1173  	// copy via a temporary directory
  1174  	copyTargetDirID := dstParentID
  1175  	if !directCopy {
  1176  		// Create a temporary directory to copy the object in to
  1177  		tmpDir := "rclone-temp-dir-" + random.String(16)
  1178  		err = f.Mkdir(ctx, tmpDir)
  1179  		if err != nil {
  1180  			return nil, errors.Wrap(err, "copy: failed to make temp dir")
  1181  		}
  1182  		defer func() {
  1183  			rmdirErr := f.Rmdir(ctx, tmpDir)
  1184  			if rmdirErr != nil && err == nil {
  1185  				err = errors.Wrap(rmdirErr, "copy: failed to remove temp dir")
  1186  			}
  1187  		}()
  1188  		tmpDirID, err := f.dirCache.FindDir(ctx, tmpDir, false)
  1189  		if err != nil {
  1190  			return nil, errors.Wrap(err, "copy: failed to find temp dir")
  1191  		}
  1192  		copyTargetDirID = tmpDirID
  1193  	}
  1194  
  1195  	// Copy the object
  1196  	opts := rest.Opts{
  1197  		Method: "POST",
  1198  		Path:   "/Items(" + srcObj.id + ")/Copy",
  1199  		Parameters: url.Values{
  1200  			"$select":   {api.ListRequestSelect},
  1201  			"overwrite": {"false"},
  1202  			"targetid":  {copyTargetDirID},
  1203  		},
  1204  	}
  1205  	var resp *http.Response
  1206  	var info *api.Item
  1207  	err = f.pacer.Call(func() (bool, error) {
  1208  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
  1209  		return shouldRetry(resp, err)
  1210  	})
  1211  	if err != nil {
  1212  		return nil, err
  1213  	}
  1214  
  1215  	// Rename into the correct name and directory if required and
  1216  	// set the modtime since the copy doesn't preserve it
  1217  	var updateParentID, updateLeaf string // only set these if necessary
  1218  	if srcLeaf != dstLeaf {
  1219  		updateLeaf = dstLeaf
  1220  	}
  1221  	if !directCopy {
  1222  		updateParentID = dstParentID
  1223  	}
  1224  	// set new modtime regardless
  1225  	info, err = f.updateItem(ctx, info.ID, updateLeaf, updateParentID, &srcObj.modTime)
  1226  	if err != nil {
  1227  		return nil, err
  1228  	}
  1229  	err = dstObj.setMetaData(info)
  1230  	if err != nil {
  1231  		return nil, err
  1232  	}
  1233  	return dstObj, nil
  1234  }
  1235  
  1236  // DirCacheFlush resets the directory cache - used in testing as an
  1237  // optional interface
  1238  func (f *Fs) DirCacheFlush() {
  1239  	f.dirCache.ResetRoot()
  1240  }
  1241  
  1242  // Hashes returns the supported hash sets.
  1243  func (f *Fs) Hashes() hash.Set {
  1244  	return hash.Set(hash.MD5)
  1245  }
  1246  
  1247  // ------------------------------------------------------------
  1248  
  1249  // Fs returns the parent Fs
  1250  func (o *Object) Fs() fs.Info {
  1251  	return o.fs
  1252  }
  1253  
  1254  // Return a string version
  1255  func (o *Object) String() string {
  1256  	if o == nil {
  1257  		return "<nil>"
  1258  	}
  1259  	return o.remote
  1260  }
  1261  
  1262  // Remote returns the remote path
  1263  func (o *Object) Remote() string {
  1264  	return o.remote
  1265  }
  1266  
  1267  // Hash returns the SHA-1 of an object returning a lowercase hex string
  1268  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1269  	if t != hash.MD5 {
  1270  		return "", hash.ErrUnsupported
  1271  	}
  1272  	err := o.readMetaData(ctx)
  1273  	if err != nil {
  1274  		return "", err
  1275  	}
  1276  	return o.md5, nil
  1277  }
  1278  
  1279  // Size returns the size of an object in bytes
  1280  func (o *Object) Size() int64 {
  1281  	err := o.readMetaData(context.TODO())
  1282  	if err != nil {
  1283  		fs.Logf(o, "Failed to read metadata: %v", err)
  1284  		return 0
  1285  	}
  1286  	return o.size
  1287  }
  1288  
  1289  // setMetaData sets the metadata from info
  1290  func (o *Object) setMetaData(info *api.Item) (err error) {
  1291  	if info.Type != api.ItemTypeFile {
  1292  		return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
  1293  	}
  1294  	o.hasMetaData = true
  1295  	o.size = info.Size
  1296  	if !info.ModifiedAt.IsZero() {
  1297  		o.modTime = info.ModifiedAt
  1298  	} else {
  1299  		o.modTime = info.CreatedAt
  1300  	}
  1301  	o.id = info.ID
  1302  	o.md5 = info.Hash
  1303  	return nil
  1304  }
  1305  
  1306  // readMetaData gets the metadata if it hasn't already been fetched
  1307  //
  1308  // it also sets the info
  1309  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1310  	if o.hasMetaData {
  1311  		return nil
  1312  	}
  1313  	var info *api.Item
  1314  	if o.id != "" {
  1315  		info, err = o.fs.readMetaDataForID(ctx, o.id, false, true)
  1316  	} else {
  1317  		info, err = o.fs.readMetaDataForPath(ctx, o.remote, false, true)
  1318  	}
  1319  	if err != nil {
  1320  		return err
  1321  	}
  1322  	return o.setMetaData(info)
  1323  }
  1324  
  1325  // ModTime returns the modification time of the object
  1326  //
  1327  //
  1328  // It attempts to read the objects mtime and if that isn't present the
  1329  // LastModified returned in the http headers
  1330  func (o *Object) ModTime(ctx context.Context) time.Time {
  1331  	err := o.readMetaData(ctx)
  1332  	if err != nil {
  1333  		fs.Logf(o, "Failed to read metadata: %v", err)
  1334  		return time.Now()
  1335  	}
  1336  	return o.modTime
  1337  }
  1338  
  1339  // SetModTime sets the modification time of the local fs object
  1340  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
  1341  	info, err := o.fs.updateItem(ctx, o.id, "", "", &modTime)
  1342  	if err != nil {
  1343  		return err
  1344  	}
  1345  	err = o.setMetaData(info)
  1346  	if err != nil {
  1347  		return err
  1348  	}
  1349  	return nil
  1350  }
  1351  
  1352  // Storable returns a boolean showing whether this object storable
  1353  func (o *Object) Storable() bool {
  1354  	return true
  1355  }
  1356  
  1357  // Open an object for read
  1358  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1359  	opts := rest.Opts{
  1360  		Method: "GET",
  1361  		Path:   "/Items(" + o.id + ")/Download",
  1362  		Parameters: url.Values{
  1363  			"redirect": {"false"},
  1364  		},
  1365  	}
  1366  	var resp *http.Response
  1367  	var dl api.DownloadSpecification
  1368  	err = o.fs.pacer.Call(func() (bool, error) {
  1369  		resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
  1370  		return shouldRetry(resp, err)
  1371  	})
  1372  	if err != nil {
  1373  		return nil, errors.Wrap(err, "open: fetch download specification")
  1374  	}
  1375  
  1376  	fs.FixRangeOption(options, o.size)
  1377  	opts = rest.Opts{
  1378  		Path:    "",
  1379  		RootURL: dl.URL,
  1380  		Method:  "GET",
  1381  		Options: options,
  1382  	}
  1383  	err = o.fs.pacer.Call(func() (bool, error) {
  1384  		resp, err = o.fs.srv.Call(ctx, &opts)
  1385  		return shouldRetry(resp, err)
  1386  	})
  1387  	if err != nil {
  1388  		return nil, errors.Wrap(err, "open")
  1389  	}
  1390  	return resp.Body, err
  1391  }
  1392  
  1393  // Update the object with the contents of the io.Reader, modTime and size
  1394  //
  1395  // If existing is set then it updates the object rather than creating a new one
  1396  //
  1397  // The new object may have been created if an error is returned
  1398  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1399  	remote := o.Remote()
  1400  	size := src.Size()
  1401  	modTime := src.ModTime(ctx)
  1402  	isLargeFile := size < 0 || size > int64(o.fs.opt.UploadCutoff)
  1403  
  1404  	// Create the directory for the object if it doesn't exist
  1405  	leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
  1406  	if err != nil {
  1407  		return err
  1408  	}
  1409  	leaf = o.fs.opt.Enc.FromStandardName(leaf)
  1410  	var req = api.UploadRequest{
  1411  		Method:       "standard",
  1412  		Raw:          true,
  1413  		Filename:     leaf,
  1414  		Overwrite:    true,
  1415  		CreatedDate:  modTime,
  1416  		ModifiedDate: modTime,
  1417  		Tool:         fs.Config.UserAgent,
  1418  	}
  1419  
  1420  	if isLargeFile {
  1421  		if size < 0 {
  1422  			// For files of indeterminate size, use streamed
  1423  			req.Method = "streamed"
  1424  		} else {
  1425  			// otherwise use threaded which is more efficient
  1426  			req.Method = "threaded"
  1427  			req.ThreadCount = &fs.Config.Transfers
  1428  			req.Filesize = &size
  1429  		}
  1430  	}
  1431  
  1432  	var resp *http.Response
  1433  	var info api.UploadSpecification
  1434  	opts := rest.Opts{
  1435  		Method:  "POST",
  1436  		Path:    "/Items(" + directoryID + ")/Upload2",
  1437  		Options: options,
  1438  	}
  1439  	err = o.fs.pacer.Call(func() (bool, error) {
  1440  		resp, err = o.fs.srv.CallJSON(ctx, &opts, &req, &info)
  1441  		return shouldRetry(resp, err)
  1442  	})
  1443  	if err != nil {
  1444  		return errors.Wrap(err, "upload get specification")
  1445  	}
  1446  
  1447  	// If file is large then upload in parts
  1448  	if isLargeFile {
  1449  		up, err := o.fs.newLargeUpload(ctx, o, in, src, &info)
  1450  		if err != nil {
  1451  			return err
  1452  		}
  1453  		return up.Upload(ctx)
  1454  	}
  1455  
  1456  	// Single part upload
  1457  	opts = rest.Opts{
  1458  		Method:        "POST",
  1459  		RootURL:       info.ChunkURI + "&fmt=json",
  1460  		Body:          in,
  1461  		ContentLength: &size,
  1462  	}
  1463  	var finish api.UploadFinishResponse
  1464  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1465  		resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &finish)
  1466  		return shouldRetry(resp, err)
  1467  	})
  1468  	if err != nil {
  1469  		return errors.Wrap(err, "upload file")
  1470  	}
  1471  	return o.checkUploadResponse(ctx, &finish)
  1472  }
  1473  
  1474  // Check the upload response and update the metadata on the object
  1475  func (o *Object) checkUploadResponse(ctx context.Context, finish *api.UploadFinishResponse) (err error) {
  1476  	// Find returned ID
  1477  	id, err := finish.ID()
  1478  	if err != nil {
  1479  		return err
  1480  	}
  1481  
  1482  	// Read metadata
  1483  	o.id = id
  1484  	o.hasMetaData = false
  1485  	return o.readMetaData(ctx)
  1486  }
  1487  
  1488  // Remove an object by ID
  1489  func (f *Fs) remove(ctx context.Context, id string) (err error) {
  1490  	opts := rest.Opts{
  1491  		Method: "DELETE",
  1492  		Path:   "/Items(" + id + ")",
  1493  		Parameters: url.Values{
  1494  			"singleversion": {"false"},
  1495  			"forceSync":     {"true"},
  1496  		},
  1497  		NoResponse: true,
  1498  	}
  1499  	var resp *http.Response
  1500  	err = f.pacer.Call(func() (bool, error) {
  1501  		resp, err = f.srv.Call(ctx, &opts)
  1502  		return shouldRetry(resp, err)
  1503  	})
  1504  	if err != nil {
  1505  		return errors.Wrap(err, "remove")
  1506  	}
  1507  	return nil
  1508  }
  1509  
  1510  // Remove an object
  1511  func (o *Object) Remove(ctx context.Context) error {
  1512  	err := o.readMetaData(ctx)
  1513  	if err != nil {
  1514  		return errors.Wrap(err, "Remove: Failed to read metadata")
  1515  	}
  1516  	return o.fs.remove(ctx, o.id)
  1517  }
  1518  
  1519  // ID returns the ID of the Object if known, or "" if not
  1520  func (o *Object) ID() string {
  1521  	return o.id
  1522  }
  1523  
  1524  // Check the interfaces are satisfied
  1525  var (
  1526  	_ fs.Fs              = (*Fs)(nil)
  1527  	_ fs.Purger          = (*Fs)(nil)
  1528  	_ fs.Mover           = (*Fs)(nil)
  1529  	_ fs.DirMover        = (*Fs)(nil)
  1530  	_ fs.Copier          = (*Fs)(nil)
  1531  	_ fs.PutStreamer     = (*Fs)(nil)
  1532  	_ fs.DirCacheFlusher = (*Fs)(nil)
  1533  	_ fs.Object          = (*Object)(nil)
  1534  	_ fs.IDer            = (*Object)(nil)
  1535  )