github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/onedrive/onedrive.go (about)

     1  // Package onedrive provides an interface to the Microsoft OneDrive
     2  // object storage system.
     3  package onedrive
     4  
     5  import (
     6  	"context"
     7  	"encoding/base64"
     8  	"encoding/hex"
     9  	"encoding/json"
    10  	"fmt"
    11  	"io"
    12  	"log"
    13  	"net/http"
    14  	"path"
    15  	"strconv"
    16  	"strings"
    17  	"time"
    18  
    19  	"github.com/pkg/errors"
    20  	"github.com/rclone/rclone/backend/onedrive/api"
    21  	"github.com/rclone/rclone/backend/onedrive/quickxorhash"
    22  	"github.com/rclone/rclone/fs"
    23  	"github.com/rclone/rclone/fs/config"
    24  	"github.com/rclone/rclone/fs/config/configmap"
    25  	"github.com/rclone/rclone/fs/config/configstruct"
    26  	"github.com/rclone/rclone/fs/config/obscure"
    27  	"github.com/rclone/rclone/fs/fserrors"
    28  	"github.com/rclone/rclone/fs/hash"
    29  	"github.com/rclone/rclone/lib/atexit"
    30  	"github.com/rclone/rclone/lib/dircache"
    31  	"github.com/rclone/rclone/lib/encoder"
    32  	"github.com/rclone/rclone/lib/oauthutil"
    33  	"github.com/rclone/rclone/lib/pacer"
    34  	"github.com/rclone/rclone/lib/readers"
    35  	"github.com/rclone/rclone/lib/rest"
    36  	"golang.org/x/oauth2"
    37  )
    38  
    39  const (
    40  	rcloneClientID              = "b15665d9-eda6-4092-8539-0eec376afd59"
    41  	rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
    42  	minSleep                    = 10 * time.Millisecond
    43  	maxSleep                    = 2 * time.Second
    44  	decayConstant               = 2 // bigger for slower decay, exponential
    45  	graphURL                    = "https://graph.microsoft.com/v1.0"
    46  	configDriveID               = "drive_id"
    47  	configDriveType             = "drive_type"
    48  	driveTypePersonal           = "personal"
    49  	driveTypeBusiness           = "business"
    50  	driveTypeSharepoint         = "documentLibrary"
    51  	defaultChunkSize            = 10 * fs.MebiByte
    52  	chunkSizeMultiple           = 320 * fs.KibiByte
    53  )
    54  
    55  // Globals
    56  var (
    57  	// Description of how to auth for this app for a business account
    58  	oauthConfig = &oauth2.Config{
    59  		Endpoint: oauth2.Endpoint{
    60  			AuthURL:  "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
    61  			TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
    62  		},
    63  		Scopes:       []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
    64  		ClientID:     rcloneClientID,
    65  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
    66  		RedirectURL:  oauthutil.RedirectLocalhostURL,
    67  	}
    68  
    69  	// QuickXorHashType is the hash.Type for OneDrive
    70  	QuickXorHashType hash.Type
    71  )
    72  
    73  // Register with Fs
    74  func init() {
    75  	QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
    76  	fs.Register(&fs.RegInfo{
    77  		Name:        "onedrive",
    78  		Description: "Microsoft OneDrive",
    79  		NewFs:       NewFs,
    80  		Config: func(name string, m configmap.Mapper) {
    81  			ctx := context.TODO()
    82  			err := oauthutil.Config("onedrive", name, m, oauthConfig, nil)
    83  			if err != nil {
    84  				log.Fatalf("Failed to configure token: %v", err)
    85  				return
    86  			}
    87  
    88  			// Stop if we are running non-interactive config
    89  			if fs.Config.AutoConfirm {
    90  				return
    91  			}
    92  
    93  			type driveResource struct {
    94  				DriveID   string `json:"id"`
    95  				DriveName string `json:"name"`
    96  				DriveType string `json:"driveType"`
    97  			}
    98  			type drivesResponse struct {
    99  				Drives []driveResource `json:"value"`
   100  			}
   101  
   102  			type siteResource struct {
   103  				SiteID   string `json:"id"`
   104  				SiteName string `json:"displayName"`
   105  				SiteURL  string `json:"webUrl"`
   106  			}
   107  			type siteResponse struct {
   108  				Sites []siteResource `json:"value"`
   109  			}
   110  
   111  			oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
   112  			if err != nil {
   113  				log.Fatalf("Failed to configure OneDrive: %v", err)
   114  			}
   115  			srv := rest.NewClient(oAuthClient)
   116  
   117  			var opts rest.Opts
   118  			var finalDriveID string
   119  			var siteID string
   120  			switch config.Choose("Your choice",
   121  				[]string{"onedrive", "sharepoint", "driveid", "siteid", "search"},
   122  				[]string{"OneDrive Personal or Business", "Root Sharepoint site", "Type in driveID", "Type in SiteID", "Search a Sharepoint site"},
   123  				false) {
   124  
   125  			case "onedrive":
   126  				opts = rest.Opts{
   127  					Method:  "GET",
   128  					RootURL: graphURL,
   129  					Path:    "/me/drives",
   130  				}
   131  			case "sharepoint":
   132  				opts = rest.Opts{
   133  					Method:  "GET",
   134  					RootURL: graphURL,
   135  					Path:    "/sites/root/drives",
   136  				}
   137  			case "driveid":
   138  				fmt.Printf("Paste your Drive ID here> ")
   139  				finalDriveID = config.ReadLine()
   140  			case "siteid":
   141  				fmt.Printf("Paste your Site ID here> ")
   142  				siteID = config.ReadLine()
   143  			case "search":
   144  				fmt.Printf("What to search for> ")
   145  				searchTerm := config.ReadLine()
   146  				opts = rest.Opts{
   147  					Method:  "GET",
   148  					RootURL: graphURL,
   149  					Path:    "/sites?search=" + searchTerm,
   150  				}
   151  
   152  				sites := siteResponse{}
   153  				_, err := srv.CallJSON(ctx, &opts, nil, &sites)
   154  				if err != nil {
   155  					log.Fatalf("Failed to query available sites: %v", err)
   156  				}
   157  
   158  				if len(sites.Sites) == 0 {
   159  					log.Fatalf("Search for '%s' returned no results", searchTerm)
   160  				} else {
   161  					fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
   162  					for index, site := range sites.Sites {
   163  						fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
   164  					}
   165  					siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
   166  				}
   167  			}
   168  
   169  			// if we have a siteID we need to ask for the drives
   170  			if siteID != "" {
   171  				opts = rest.Opts{
   172  					Method:  "GET",
   173  					RootURL: graphURL,
   174  					Path:    "/sites/" + siteID + "/drives",
   175  				}
   176  			}
   177  
   178  			// We don't have the final ID yet?
   179  			// query Microsoft Graph
   180  			if finalDriveID == "" {
   181  				drives := drivesResponse{}
   182  				_, err := srv.CallJSON(ctx, &opts, nil, &drives)
   183  				if err != nil {
   184  					log.Fatalf("Failed to query available drives: %v", err)
   185  				}
   186  
   187  				// Also call /me/drive as sometimes /me/drives doesn't return it #4068
   188  				if opts.Path == "/me/drives" {
   189  					opts.Path = "/me/drive"
   190  					meDrive := driveResource{}
   191  					_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
   192  					if err != nil {
   193  						log.Fatalf("Failed to query available drives: %v", err)
   194  					}
   195  					found := false
   196  					for _, drive := range drives.Drives {
   197  						if drive.DriveID == meDrive.DriveID {
   198  							found = true
   199  							break
   200  						}
   201  					}
   202  					// add the me drive if not found already
   203  					if !found {
   204  						fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
   205  						drives.Drives = append(drives.Drives, meDrive)
   206  					}
   207  				}
   208  
   209  				if len(drives.Drives) == 0 {
   210  					log.Fatalf("No drives found")
   211  				} else {
   212  					fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
   213  					for index, drive := range drives.Drives {
   214  						fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
   215  					}
   216  					finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
   217  				}
   218  			}
   219  
   220  			// Test the driveID and get drive type
   221  			opts = rest.Opts{
   222  				Method:  "GET",
   223  				RootURL: graphURL,
   224  				Path:    "/drives/" + finalDriveID + "/root"}
   225  			var rootItem api.Item
   226  			_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
   227  			if err != nil {
   228  				log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
   229  			}
   230  
   231  			fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
   232  			// This does not work, YET :)
   233  			if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
   234  				log.Fatalf("Cancelled by user")
   235  			}
   236  
   237  			m.Set(configDriveID, finalDriveID)
   238  			m.Set(configDriveType, rootItem.ParentReference.DriveType)
   239  			config.SaveConfig()
   240  		},
   241  		Options: []fs.Option{{
   242  			Name: config.ConfigClientID,
   243  			Help: "Microsoft App Client Id\nLeave blank normally.",
   244  		}, {
   245  			Name: config.ConfigClientSecret,
   246  			Help: "Microsoft App Client Secret\nLeave blank normally.",
   247  		}, {
   248  			Name: "chunk_size",
   249  			Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
   250  
   251  Above this size files will be chunked - must be multiple of 320k (327,680 bytes) and
   252  should not exceed 250M (262,144,000 bytes) else you may encounter \"Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
   253  Note that the chunks will be buffered into memory.`,
   254  			Default:  defaultChunkSize,
   255  			Advanced: true,
   256  		}, {
   257  			Name:     "drive_id",
   258  			Help:     "The ID of the drive to use",
   259  			Default:  "",
   260  			Advanced: true,
   261  		}, {
   262  			Name:     "drive_type",
   263  			Help:     "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )",
   264  			Default:  "",
   265  			Advanced: true,
   266  		}, {
   267  			Name: "expose_onenote_files",
   268  			Help: `Set to make OneNote files show up in directory listings.
   269  
   270  By default rclone will hide OneNote files in directory listings because
   271  operations like "Open" and "Update" won't work on them.  But this
   272  behaviour may also prevent you from deleting them.  If you want to
   273  delete OneNote files or otherwise want them to show up in directory
   274  listing, set this option.`,
   275  			Default:  false,
   276  			Advanced: true,
   277  		}, {
   278  			Name:    "server_side_across_configs",
   279  			Default: false,
   280  			Help: `Allow server side operations (eg copy) to work across different onedrive configs.
   281  
   282  This can be useful if you wish to do a server side copy between two
   283  different Onedrives.  Note that this isn't enabled by default
   284  because it isn't easy to tell if it will work between any two
   285  configurations.`,
   286  			Advanced: true,
   287  		}, {
   288  			Name:     config.ConfigEncoding,
   289  			Help:     config.ConfigEncodingHelp,
   290  			Advanced: true,
   291  			// List of replaced characters:
   292  			//   < (less than)     -> '<' // FULLWIDTH LESS-THAN SIGN
   293  			//   > (greater than)  -> '>' // FULLWIDTH GREATER-THAN SIGN
   294  			//   : (colon)         -> ':' // FULLWIDTH COLON
   295  			//   " (double quote)  -> '"' // FULLWIDTH QUOTATION MARK
   296  			//   \ (backslash)     -> '\' // FULLWIDTH REVERSE SOLIDUS
   297  			//   | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE
   298  			//   ? (question mark) -> '?' // FULLWIDTH QUESTION MARK
   299  			//   * (asterisk)      -> '*' // FULLWIDTH ASTERISK
   300  			//   # (number sign)  -> '#'  // FULLWIDTH NUMBER SIGN
   301  			//   % (percent sign) -> '%'  // FULLWIDTH PERCENT SIGN
   302  			//
   303  			// Folder names cannot begin with a tilde ('~')
   304  			// List of replaced characters:
   305  			//   ~ (tilde)        -> '~'  // FULLWIDTH TILDE
   306  			//
   307  			// Additionally names can't begin with a space ( ) or end with a period (.) or space ( ).
   308  			// List of replaced characters:
   309  			//   . (period)        -> '.' // FULLWIDTH FULL STOP
   310  			//     (space)         -> '␠'  // SYMBOL FOR SPACE
   311  			//
   312  			// Also encode invalid UTF-8 bytes as json doesn't handle them.
   313  			//
   314  			// The OneDrive API documentation lists the set of reserved characters, but
   315  			// testing showed this list is incomplete. This are the differences:
   316  			//  - " (double quote) is rejected, but missing in the documentation
   317  			//  - space at the end of file and folder names is rejected, but missing in the documentation
   318  			//  - period at the end of file names is rejected, but missing in the documentation
   319  			//
   320  			// Adding these restrictions to the OneDrive API documentation yields exactly
   321  			// the same rules as the Windows naming conventions.
   322  			//
   323  			// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
   324  			Default: (encoder.Display |
   325  				encoder.EncodeBackSlash |
   326  				encoder.EncodeHashPercent |
   327  				encoder.EncodeLeftSpace |
   328  				encoder.EncodeLeftTilde |
   329  				encoder.EncodeRightPeriod |
   330  				encoder.EncodeRightSpace |
   331  				encoder.EncodeWin |
   332  				encoder.EncodeInvalidUtf8),
   333  		}},
   334  	})
   335  }
   336  
   337  // Options defines the configuration for this backend
   338  type Options struct {
   339  	ChunkSize               fs.SizeSuffix        `config:"chunk_size"`
   340  	DriveID                 string               `config:"drive_id"`
   341  	DriveType               string               `config:"drive_type"`
   342  	ExposeOneNoteFiles      bool                 `config:"expose_onenote_files"`
   343  	ServerSideAcrossConfigs bool                 `config:"server_side_across_configs"`
   344  	Enc                     encoder.MultiEncoder `config:"encoding"`
   345  }
   346  
   347  // Fs represents a remote one drive
   348  type Fs struct {
   349  	name         string             // name of this remote
   350  	root         string             // the path we are working on
   351  	opt          Options            // parsed options
   352  	features     *fs.Features       // optional features
   353  	srv          *rest.Client       // the connection to the one drive server
   354  	dirCache     *dircache.DirCache // Map of directory path to directory id
   355  	pacer        *fs.Pacer          // pacer for API calls
   356  	tokenRenewer *oauthutil.Renew   // renew the token on expiry
   357  	driveID      string             // ID to use for querying Microsoft Graph
   358  	driveType    string             // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
   359  }
   360  
   361  // Object describes a one drive object
   362  //
   363  // Will definitely have info but maybe not meta
   364  type Object struct {
   365  	fs            *Fs       // what this object is part of
   366  	remote        string    // The remote path
   367  	hasMetaData   bool      // whether info below has been set
   368  	isOneNoteFile bool      // Whether the object is a OneNote file
   369  	size          int64     // size of the object
   370  	modTime       time.Time // modification time of the object
   371  	id            string    // ID of the object
   372  	sha1          string    // SHA-1 of the object content
   373  	quickxorhash  string    // QuickXorHash of the object content
   374  	mimeType      string    // Content-Type of object from server (may not be as uploaded)
   375  }
   376  
   377  // ------------------------------------------------------------
   378  
   379  // Name of the remote (as passed into NewFs)
   380  func (f *Fs) Name() string {
   381  	return f.name
   382  }
   383  
   384  // Root of the remote (as passed into NewFs)
   385  func (f *Fs) Root() string {
   386  	return f.root
   387  }
   388  
   389  // String converts this Fs to a string
   390  func (f *Fs) String() string {
   391  	return fmt.Sprintf("One drive root '%s'", f.root)
   392  }
   393  
   394  // Features returns the optional features of this Fs
   395  func (f *Fs) Features() *fs.Features {
   396  	return f.features
   397  }
   398  
   399  // parsePath parses a one drive 'url'
   400  func parsePath(path string) (root string) {
   401  	root = strings.Trim(path, "/")
   402  	return
   403  }
   404  
   405  // retryErrorCodes is a slice of error codes that we will retry
   406  var retryErrorCodes = []int{
   407  	429, // Too Many Requests.
   408  	500, // Internal Server Error
   409  	502, // Bad Gateway
   410  	503, // Service Unavailable
   411  	504, // Gateway Timeout
   412  	509, // Bandwidth Limit Exceeded
   413  }
   414  
   415  // shouldRetry returns a boolean as to whether this resp and err
   416  // deserve to be retried.  It returns the err as a convenience
   417  func shouldRetry(resp *http.Response, err error) (bool, error) {
   418  	retry := false
   419  	if resp != nil {
   420  		switch resp.StatusCode {
   421  		case 401:
   422  			if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
   423  				retry = true
   424  				fs.Debugf(nil, "Should retry: %v", err)
   425  			}
   426  		case 429: // Too Many Requests.
   427  			// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
   428  			if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
   429  				retryAfter, parseErr := strconv.Atoi(values[0])
   430  				if parseErr != nil {
   431  					fs.Debugf(nil, "Failed to parse Retry-After: %q: %v", values[0], parseErr)
   432  				} else {
   433  					duration := time.Second * time.Duration(retryAfter)
   434  					retry = true
   435  					err = pacer.RetryAfterError(err, duration)
   436  					fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
   437  				}
   438  			}
   439  		case 507: // Insufficient Storage
   440  			return false, fserrors.FatalError(err)
   441  		}
   442  	}
   443  	return retry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   444  }
   445  
   446  // readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
   447  // if `relPath` == "", it reads the metadata for the item with that ID.
   448  //
   449  // We address items using the pattern `drives/driveID/items/itemID:/relativePath`
   450  // instead of simply using `drives/driveID/root:/itemPath` because it works for
   451  // "shared with me" folders in OneDrive Personal (See #2536, #2778)
   452  // This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
   453  //
   454  // If `relPath` == '', do not append the slash (See #3664)
   455  func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
   456  	if relPath != "" {
   457  		relPath = "/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(relPath)))
   458  	}
   459  	opts := newOptsCall(normalizedID, "GET", ":"+relPath)
   460  	err = f.pacer.Call(func() (bool, error) {
   461  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
   462  		return shouldRetry(resp, err)
   463  	})
   464  
   465  	return info, resp, err
   466  }
   467  
   468  // readMetaDataForPath reads the metadata from the path (relative to the absolute root)
   469  func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, resp *http.Response, err error) {
   470  	firstSlashIndex := strings.IndexRune(path, '/')
   471  
   472  	if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
   473  		var opts rest.Opts
   474  		if len(path) == 0 {
   475  			opts = rest.Opts{
   476  				Method: "GET",
   477  				Path:   "/root",
   478  			}
   479  		} else {
   480  			opts = rest.Opts{
   481  				Method: "GET",
   482  				Path:   "/root:/" + rest.URLPathEscape(f.opt.Enc.FromStandardPath(path)),
   483  			}
   484  		}
   485  		err = f.pacer.Call(func() (bool, error) {
   486  			resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
   487  			return shouldRetry(resp, err)
   488  		})
   489  		return info, resp, err
   490  	}
   491  
   492  	// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
   493  	// For OneDrive Personal, we need to consider the "shared with me" folders.
   494  	// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
   495  	// by its path relative to the folder's ID relative to the sharer's driveID.
   496  	// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
   497  	// So we read metadata relative to a suitable folder's normalized ID.
   498  	var dirCacheFoundRoot bool
   499  	var rootNormalizedID string
   500  	if f.dirCache != nil {
   501  		var dirCacheRootIDExists bool
   502  		rootNormalizedID, dirCacheRootIDExists = f.dirCache.Get("")
   503  		if f.root == "" {
   504  			// if f.root == "", it means f.root is the absolute root of the drive
   505  			// and its ID should have been found in NewFs
   506  			dirCacheFoundRoot = dirCacheRootIDExists
   507  		} else if _, err := f.dirCache.RootParentID(); err == nil {
   508  			// if root is in a folder, it must have a parent folder, and
   509  			// if dirCache has found root in NewFs, the parent folder's ID
   510  			// should be present.
   511  			// This RootParentID() check is a fix for #3164 which describes
   512  			// a possible case where the root is not found.
   513  			dirCacheFoundRoot = dirCacheRootIDExists
   514  		}
   515  	}
   516  
   517  	relPath, insideRoot := getRelativePathInsideBase(f.root, path)
   518  	var firstDir, baseNormalizedID string
   519  	if !insideRoot || !dirCacheFoundRoot {
   520  		// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
   521  		firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
   522  		info, resp, err := f.readMetaDataForPath(ctx, firstDir)
   523  		if err != nil {
   524  			return info, resp, err
   525  		}
   526  		baseNormalizedID = info.GetID()
   527  	} else {
   528  		if f.root != "" {
   529  			// Read metadata based on root
   530  			baseNormalizedID = rootNormalizedID
   531  		} else {
   532  			// Read metadata based on firstDir
   533  			firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
   534  			baseNormalizedID, err = f.dirCache.FindDir(ctx, firstDir, false)
   535  			if err != nil {
   536  				return nil, nil, err
   537  			}
   538  		}
   539  	}
   540  
   541  	return f.readMetaDataForPathRelativeToID(ctx, baseNormalizedID, relPath)
   542  }
   543  
   544  // errorHandler parses a non 2xx error response into an error
   545  func errorHandler(resp *http.Response) error {
   546  	// Decode error response
   547  	errResponse := new(api.Error)
   548  	err := rest.DecodeJSON(resp, &errResponse)
   549  	if err != nil {
   550  		fs.Debugf(nil, "Couldn't decode error response: %v", err)
   551  	}
   552  	if errResponse.ErrorInfo.Code == "" {
   553  		errResponse.ErrorInfo.Code = resp.Status
   554  	}
   555  	return errResponse
   556  }
   557  
   558  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   559  	const minChunkSize = fs.Byte
   560  	if cs%chunkSizeMultiple != 0 {
   561  		return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
   562  	}
   563  	if cs < minChunkSize {
   564  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   565  	}
   566  	return nil
   567  }
   568  
   569  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   570  	err = checkUploadChunkSize(cs)
   571  	if err == nil {
   572  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   573  	}
   574  	return
   575  }
   576  
   577  // NewFs constructs an Fs from the path, container:path
   578  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   579  	ctx := context.Background()
   580  	// Parse config into Options struct
   581  	opt := new(Options)
   582  	err := configstruct.Set(m, opt)
   583  	if err != nil {
   584  		return nil, err
   585  	}
   586  
   587  	err = checkUploadChunkSize(opt.ChunkSize)
   588  	if err != nil {
   589  		return nil, errors.Wrap(err, "onedrive: chunk size")
   590  	}
   591  
   592  	if opt.DriveID == "" || opt.DriveType == "" {
   593  		return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
   594  	}
   595  
   596  	root = parsePath(root)
   597  	oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
   598  	if err != nil {
   599  		return nil, errors.Wrap(err, "failed to configure OneDrive")
   600  	}
   601  
   602  	f := &Fs{
   603  		name:      name,
   604  		root:      root,
   605  		opt:       *opt,
   606  		driveID:   opt.DriveID,
   607  		driveType: opt.DriveType,
   608  		srv:       rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
   609  		pacer:     fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   610  	}
   611  	f.features = (&fs.Features{
   612  		CaseInsensitive:         true,
   613  		ReadMimeType:            true,
   614  		CanHaveEmptyDirectories: true,
   615  		ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
   616  	}).Fill(f)
   617  	f.srv.SetErrorHandler(errorHandler)
   618  
   619  	// Renew the token in the background
   620  	f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
   621  		_, _, err := f.readMetaDataForPath(ctx, "")
   622  		return err
   623  	})
   624  
   625  	// Get rootID
   626  	rootInfo, _, err := f.readMetaDataForPath(ctx, "")
   627  	if err != nil || rootInfo.GetID() == "" {
   628  		return nil, errors.Wrap(err, "failed to get root")
   629  	}
   630  
   631  	f.dirCache = dircache.New(root, rootInfo.GetID(), f)
   632  
   633  	// Find the current root
   634  	err = f.dirCache.FindRoot(ctx, false)
   635  	if err != nil {
   636  		// Assume it is a file
   637  		newRoot, remote := dircache.SplitPath(root)
   638  		tempF := *f
   639  		tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
   640  		tempF.root = newRoot
   641  		// Make new Fs which is the parent
   642  		err = tempF.dirCache.FindRoot(ctx, false)
   643  		if err != nil {
   644  			// No root so return old f
   645  			return f, nil
   646  		}
   647  		_, err := tempF.newObjectWithInfo(ctx, remote, nil)
   648  		if err != nil {
   649  			if err == fs.ErrorObjectNotFound {
   650  				// File doesn't exist so return old f
   651  				return f, nil
   652  			}
   653  			return nil, err
   654  		}
   655  		// XXX: update the old f here instead of returning tempF, since
   656  		// `features` were already filled with functions having *f as a receiver.
   657  		// See https://github.com/rclone/rclone/issues/2182
   658  		f.dirCache = tempF.dirCache
   659  		f.root = tempF.root
   660  		// return an error with an fs which points to the parent
   661  		return f, fs.ErrorIsFile
   662  	}
   663  	return f, nil
   664  }
   665  
   666  // rootSlash returns root with a slash on if it is empty, otherwise empty string
   667  func (f *Fs) rootSlash() string {
   668  	if f.root == "" {
   669  		return f.root
   670  	}
   671  	return f.root + "/"
   672  }
   673  
   674  // Return an Object from a path
   675  //
   676  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   677  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
   678  	o := &Object{
   679  		fs:     f,
   680  		remote: remote,
   681  	}
   682  	var err error
   683  	if info != nil {
   684  		// Set info
   685  		err = o.setMetaData(info)
   686  	} else {
   687  		err = o.readMetaData(ctx) // reads info and meta, returning an error
   688  	}
   689  	if err != nil {
   690  		return nil, err
   691  	}
   692  	return o, nil
   693  }
   694  
   695  // NewObject finds the Object at remote.  If it can't be found
   696  // it returns the error fs.ErrorObjectNotFound.
   697  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   698  	return f.newObjectWithInfo(ctx, remote, nil)
   699  }
   700  
   701  // FindLeaf finds a directory of name leaf in the folder with ID pathID
   702  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
   703  	// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
   704  	_, ok := f.dirCache.GetInv(pathID)
   705  	if !ok {
   706  		return "", false, errors.New("couldn't find parent ID")
   707  	}
   708  	info, resp, err := f.readMetaDataForPathRelativeToID(ctx, pathID, leaf)
   709  	if err != nil {
   710  		if resp != nil && resp.StatusCode == http.StatusNotFound {
   711  			return "", false, nil
   712  		}
   713  		return "", false, err
   714  	}
   715  	if info.GetPackageType() == api.PackageTypeOneNote {
   716  		return "", false, errors.New("found OneNote file when looking for folder")
   717  	}
   718  	if info.GetFolder() == nil {
   719  		return "", false, errors.New("found file when looking for folder")
   720  	}
   721  	return info.GetID(), true, nil
   722  }
   723  
   724  // CreateDir makes a directory with pathID as parent and name leaf
   725  func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) {
   726  	// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
   727  	var resp *http.Response
   728  	var info *api.Item
   729  	opts := newOptsCall(dirID, "POST", "/children")
   730  	mkdir := api.CreateItemRequest{
   731  		Name:             f.opt.Enc.FromStandardName(leaf),
   732  		ConflictBehavior: "fail",
   733  	}
   734  	err = f.pacer.Call(func() (bool, error) {
   735  		resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
   736  		return shouldRetry(resp, err)
   737  	})
   738  	if err != nil {
   739  		//fmt.Printf("...Error %v\n", err)
   740  		return "", err
   741  	}
   742  
   743  	//fmt.Printf("...Id %q\n", *info.Id)
   744  	return info.GetID(), nil
   745  }
   746  
   747  // list the objects into the function supplied
   748  //
   749  // If directories is set it only sends directories
   750  // User function to process a File item from listAll
   751  //
   752  // Should return true to finish processing
   753  type listAllFn func(*api.Item) bool
   754  
   755  // Lists the directory required calling the user function on each item found
   756  //
   757  // If the user fn ever returns true then it early exits with found = true
   758  func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
   759  	// Top parameter asks for bigger pages of data
   760  	// https://dev.onedrive.com/odata/optional-query-parameters.htm
   761  	opts := newOptsCall(dirID, "GET", "/children?$top=1000")
   762  OUTER:
   763  	for {
   764  		var result api.ListChildrenResponse
   765  		var resp *http.Response
   766  		err = f.pacer.Call(func() (bool, error) {
   767  			resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
   768  			return shouldRetry(resp, err)
   769  		})
   770  		if err != nil {
   771  			return found, errors.Wrap(err, "couldn't list files")
   772  		}
   773  		if len(result.Value) == 0 {
   774  			break
   775  		}
   776  		for i := range result.Value {
   777  			item := &result.Value[i]
   778  			isFolder := item.GetFolder() != nil
   779  			if isFolder {
   780  				if filesOnly {
   781  					continue
   782  				}
   783  			} else {
   784  				if directoriesOnly {
   785  					continue
   786  				}
   787  			}
   788  			if item.Deleted != nil {
   789  				continue
   790  			}
   791  			item.Name = f.opt.Enc.ToStandardName(item.GetName())
   792  			if fn(item) {
   793  				found = true
   794  				break OUTER
   795  			}
   796  		}
   797  		if result.NextLink == "" {
   798  			break
   799  		}
   800  		opts.Path = ""
   801  		opts.RootURL = result.NextLink
   802  	}
   803  	return
   804  }
   805  
   806  // List the objects and directories in dir into entries.  The
   807  // entries can be returned in any order but should be for a
   808  // complete directory.
   809  //
   810  // dir should be "" to list the root, and should not have
   811  // trailing slashes.
   812  //
   813  // This should return ErrDirNotFound if the directory isn't
   814  // found.
   815  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   816  	err = f.dirCache.FindRoot(ctx, false)
   817  	if err != nil {
   818  		return nil, err
   819  	}
   820  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
   821  	if err != nil {
   822  		return nil, err
   823  	}
   824  	var iErr error
   825  	_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
   826  		if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
   827  			fs.Debugf(info.Name, "OneNote file not shown in directory listing")
   828  			return false
   829  		}
   830  
   831  		remote := path.Join(dir, info.GetName())
   832  		folder := info.GetFolder()
   833  		if folder != nil {
   834  			// cache the directory ID for later lookups
   835  			id := info.GetID()
   836  			f.dirCache.Put(remote, id)
   837  			d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
   838  			d.SetItems(folder.ChildCount)
   839  			entries = append(entries, d)
   840  		} else {
   841  			o, err := f.newObjectWithInfo(ctx, remote, info)
   842  			if err != nil {
   843  				iErr = err
   844  				return true
   845  			}
   846  			entries = append(entries, o)
   847  		}
   848  		return false
   849  	})
   850  	if err != nil {
   851  		return nil, err
   852  	}
   853  	if iErr != nil {
   854  		return nil, iErr
   855  	}
   856  	return entries, nil
   857  }
   858  
   859  // Creates from the parameters passed in a half finished Object which
   860  // must have setMetaData called on it
   861  //
   862  // Returns the object, leaf, directoryID and error
   863  //
   864  // Used to create new objects
   865  func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
   866  	// Create the directory for the object if it doesn't exist
   867  	leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
   868  	if err != nil {
   869  		return nil, leaf, directoryID, err
   870  	}
   871  	// Temporary Object under construction
   872  	o = &Object{
   873  		fs:     f,
   874  		remote: remote,
   875  	}
   876  	return o, leaf, directoryID, nil
   877  }
   878  
   879  // Put the object into the container
   880  //
   881  // Copy the reader in to the new object which is returned
   882  //
   883  // The new object may have been created if an error is returned
   884  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   885  	remote := src.Remote()
   886  	size := src.Size()
   887  	modTime := src.ModTime(ctx)
   888  
   889  	o, _, _, err := f.createObject(ctx, remote, modTime, size)
   890  	if err != nil {
   891  		return nil, err
   892  	}
   893  	return o, o.Update(ctx, in, src, options...)
   894  }
   895  
   896  // Mkdir creates the container if it doesn't exist
   897  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   898  	err := f.dirCache.FindRoot(ctx, true)
   899  	if err != nil {
   900  		return err
   901  	}
   902  	if dir != "" {
   903  		_, err = f.dirCache.FindDir(ctx, dir, true)
   904  	}
   905  	return err
   906  }
   907  
   908  // deleteObject removes an object by ID
   909  func (f *Fs) deleteObject(ctx context.Context, id string) error {
   910  	opts := newOptsCall(id, "DELETE", "")
   911  	opts.NoResponse = true
   912  
   913  	return f.pacer.Call(func() (bool, error) {
   914  		resp, err := f.srv.Call(ctx, &opts)
   915  		return shouldRetry(resp, err)
   916  	})
   917  }
   918  
   919  // purgeCheck removes the root directory, if check is set then it
   920  // refuses to do so if it has anything in
   921  func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
   922  	root := path.Join(f.root, dir)
   923  	if root == "" {
   924  		return errors.New("can't purge root directory")
   925  	}
   926  	dc := f.dirCache
   927  	err := dc.FindRoot(ctx, false)
   928  	if err != nil {
   929  		return err
   930  	}
   931  	rootID, err := dc.FindDir(ctx, dir, false)
   932  	if err != nil {
   933  		return err
   934  	}
   935  	if check {
   936  		// check to see if there are any items
   937  		found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
   938  			return true
   939  		})
   940  		if err != nil {
   941  			return err
   942  		}
   943  		if found {
   944  			return fs.ErrorDirectoryNotEmpty
   945  		}
   946  	}
   947  	err = f.deleteObject(ctx, rootID)
   948  	if err != nil {
   949  		return err
   950  	}
   951  	f.dirCache.FlushDir(dir)
   952  	return nil
   953  }
   954  
   955  // Rmdir deletes the root folder
   956  //
   957  // Returns an error if it isn't empty
   958  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   959  	return f.purgeCheck(ctx, dir, true)
   960  }
   961  
   962  // Precision return the precision of this Fs
   963  func (f *Fs) Precision() time.Duration {
   964  	return time.Second
   965  }
   966  
   967  // waitForJob waits for the job with status in url to complete
   968  func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
   969  	deadline := time.Now().Add(fs.Config.Timeout)
   970  	for time.Now().Before(deadline) {
   971  		var resp *http.Response
   972  		var err error
   973  		var body []byte
   974  		err = f.pacer.Call(func() (bool, error) {
   975  			resp, err = http.Get(location)
   976  			if err != nil {
   977  				return fserrors.ShouldRetry(err), err
   978  			}
   979  			body, err = rest.ReadBody(resp)
   980  			return fserrors.ShouldRetry(err), err
   981  		})
   982  		if err != nil {
   983  			return err
   984  		}
   985  		// Try to decode the body first as an api.AsyncOperationStatus
   986  		var status api.AsyncOperationStatus
   987  		err = json.Unmarshal(body, &status)
   988  		if err != nil {
   989  			return errors.Wrapf(err, "async status result not JSON: %q", body)
   990  		}
   991  
   992  		switch status.Status {
   993  		case "failed":
   994  		case "deleteFailed":
   995  			{
   996  				return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
   997  			}
   998  		case "completed":
   999  			err = o.readMetaData(ctx)
  1000  			return errors.Wrapf(err, "async operation completed but readMetaData failed")
  1001  		}
  1002  
  1003  		time.Sleep(1 * time.Second)
  1004  	}
  1005  	return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
  1006  }
  1007  
  1008  // Copy src to this remote using server side copy operations.
  1009  //
  1010  // This is stored with the remote path given
  1011  //
  1012  // It returns the destination Object and a possible error
  1013  //
  1014  // Will only be called if src.Fs().Name() == f.Name()
  1015  //
  1016  // If it isn't possible then return fs.ErrorCantCopy
  1017  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1018  	srcObj, ok := src.(*Object)
  1019  	if !ok {
  1020  		fs.Debugf(src, "Can't copy - not same remote type")
  1021  		return nil, fs.ErrorCantCopy
  1022  	}
  1023  	err := srcObj.readMetaData(ctx)
  1024  	if err != nil {
  1025  		return nil, err
  1026  	}
  1027  
  1028  	// Check we aren't overwriting a file on the same remote
  1029  	if srcObj.fs == f {
  1030  		srcPath := srcObj.rootPath()
  1031  		dstPath := f.rootPath(remote)
  1032  		if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
  1033  			return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
  1034  		}
  1035  	}
  1036  
  1037  	// Create temporary object
  1038  	dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
  1039  	if err != nil {
  1040  		return nil, err
  1041  	}
  1042  
  1043  	// Copy the object
  1044  	opts := newOptsCall(srcObj.id, "POST", "/copy")
  1045  	opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
  1046  	opts.NoResponse = true
  1047  
  1048  	id, dstDriveID, _ := parseNormalizedID(directoryID)
  1049  
  1050  	replacedLeaf := f.opt.Enc.FromStandardName(leaf)
  1051  	copyReq := api.CopyItemRequest{
  1052  		Name: &replacedLeaf,
  1053  		ParentReference: api.ItemReference{
  1054  			DriveID: dstDriveID,
  1055  			ID:      id,
  1056  		},
  1057  	}
  1058  	var resp *http.Response
  1059  	err = f.pacer.Call(func() (bool, error) {
  1060  		resp, err = f.srv.CallJSON(ctx, &opts, &copyReq, nil)
  1061  		return shouldRetry(resp, err)
  1062  	})
  1063  	if err != nil {
  1064  		return nil, err
  1065  	}
  1066  
  1067  	// read location header
  1068  	location := resp.Header.Get("Location")
  1069  	if location == "" {
  1070  		return nil, errors.New("didn't receive location header in copy response")
  1071  	}
  1072  
  1073  	// Wait for job to finish
  1074  	err = f.waitForJob(ctx, location, dstObj)
  1075  	if err != nil {
  1076  		return nil, err
  1077  	}
  1078  
  1079  	// Copy does NOT copy the modTime from the source and there seems to
  1080  	// be no way to set date before
  1081  	// This will create TWO versions on OneDrive
  1082  	err = dstObj.SetModTime(ctx, srcObj.ModTime(ctx))
  1083  	if err != nil {
  1084  		return nil, err
  1085  	}
  1086  
  1087  	return dstObj, nil
  1088  }
  1089  
  1090  // Purge deletes all the files and the container
  1091  //
  1092  // Optional interface: Only implement this if you have a way of
  1093  // deleting all the files quicker than just running Remove() on the
  1094  // result of List()
  1095  func (f *Fs) Purge(ctx context.Context) error {
  1096  	return f.purgeCheck(ctx, "", false)
  1097  }
  1098  
  1099  // Move src to this remote using server side move operations.
  1100  //
  1101  // This is stored with the remote path given
  1102  //
  1103  // It returns the destination Object and a possible error
  1104  //
  1105  // Will only be called if src.Fs().Name() == f.Name()
  1106  //
  1107  // If it isn't possible then return fs.ErrorCantMove
  1108  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1109  	srcObj, ok := src.(*Object)
  1110  	if !ok {
  1111  		fs.Debugf(src, "Can't move - not same remote type")
  1112  		return nil, fs.ErrorCantMove
  1113  	}
  1114  
  1115  	// Create temporary object
  1116  	dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
  1117  	if err != nil {
  1118  		return nil, err
  1119  	}
  1120  
  1121  	id, dstDriveID, _ := parseNormalizedID(directoryID)
  1122  	_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
  1123  
  1124  	if dstDriveID != srcObjDriveID {
  1125  		// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
  1126  		// "Items cannot be moved between Drives using this request."
  1127  		return nil, fs.ErrorCantMove
  1128  	}
  1129  
  1130  	// Move the object
  1131  	opts := newOptsCall(srcObj.id, "PATCH", "")
  1132  
  1133  	move := api.MoveItemRequest{
  1134  		Name: f.opt.Enc.FromStandardName(leaf),
  1135  		ParentReference: &api.ItemReference{
  1136  			DriveID: dstDriveID,
  1137  			ID:      id,
  1138  		},
  1139  		// We set the mod time too as it gets reset otherwise
  1140  		FileSystemInfo: &api.FileSystemInfoFacet{
  1141  			CreatedDateTime:      api.Timestamp(srcObj.modTime),
  1142  			LastModifiedDateTime: api.Timestamp(srcObj.modTime),
  1143  		},
  1144  	}
  1145  	var resp *http.Response
  1146  	var info api.Item
  1147  	err = f.pacer.Call(func() (bool, error) {
  1148  		resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
  1149  		return shouldRetry(resp, err)
  1150  	})
  1151  	if err != nil {
  1152  		return nil, err
  1153  	}
  1154  
  1155  	err = dstObj.setMetaData(&info)
  1156  	if err != nil {
  1157  		return nil, err
  1158  	}
  1159  	return dstObj, nil
  1160  }
  1161  
  1162  // DirMove moves src, srcRemote to this remote at dstRemote
  1163  // using server side move operations.
  1164  //
  1165  // Will only be called if src.Fs().Name() == f.Name()
  1166  //
  1167  // If it isn't possible then return fs.ErrorCantDirMove
  1168  //
  1169  // If destination exists then return fs.ErrorDirExists
  1170  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1171  	srcFs, ok := src.(*Fs)
  1172  	if !ok {
  1173  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  1174  		return fs.ErrorCantDirMove
  1175  	}
  1176  	srcPath := path.Join(srcFs.root, srcRemote)
  1177  	dstPath := path.Join(f.root, dstRemote)
  1178  
  1179  	// Refuse to move to or from the root
  1180  	if srcPath == "" || dstPath == "" {
  1181  		fs.Debugf(src, "DirMove error: Can't move root")
  1182  		return errors.New("can't move root directory")
  1183  	}
  1184  
  1185  	// find the root src directory
  1186  	err := srcFs.dirCache.FindRoot(ctx, false)
  1187  	if err != nil {
  1188  		return err
  1189  	}
  1190  
  1191  	// find the root dst directory
  1192  	if dstRemote != "" {
  1193  		err = f.dirCache.FindRoot(ctx, true)
  1194  		if err != nil {
  1195  			return err
  1196  		}
  1197  	} else {
  1198  		if f.dirCache.FoundRoot() {
  1199  			return fs.ErrorDirExists
  1200  		}
  1201  	}
  1202  
  1203  	// Find ID of dst parent, creating subdirs if necessary
  1204  	var leaf, dstDirectoryID string
  1205  	findPath := dstRemote
  1206  	if dstRemote == "" {
  1207  		findPath = f.root
  1208  	}
  1209  	leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
  1210  	if err != nil {
  1211  		return err
  1212  	}
  1213  	parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
  1214  
  1215  	// Find ID of src
  1216  	srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
  1217  	if err != nil {
  1218  		return err
  1219  	}
  1220  	_, srcDriveID, _ := parseNormalizedID(srcID)
  1221  
  1222  	if dstDriveID != srcDriveID {
  1223  		// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
  1224  		// "Items cannot be moved between Drives using this request."
  1225  		return fs.ErrorCantDirMove
  1226  	}
  1227  
  1228  	// Check destination does not exist
  1229  	if dstRemote != "" {
  1230  		_, err = f.dirCache.FindDir(ctx, dstRemote, false)
  1231  		if err == fs.ErrorDirNotFound {
  1232  			// OK
  1233  		} else if err != nil {
  1234  			return err
  1235  		} else {
  1236  			return fs.ErrorDirExists
  1237  		}
  1238  	}
  1239  
  1240  	// Get timestamps of src so they can be preserved
  1241  	srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(ctx, srcID, "")
  1242  	if err != nil {
  1243  		return err
  1244  	}
  1245  
  1246  	// Do the move
  1247  	opts := newOptsCall(srcID, "PATCH", "")
  1248  	move := api.MoveItemRequest{
  1249  		Name: f.opt.Enc.FromStandardName(leaf),
  1250  		ParentReference: &api.ItemReference{
  1251  			DriveID: dstDriveID,
  1252  			ID:      parsedDstDirID,
  1253  		},
  1254  		// We set the mod time too as it gets reset otherwise
  1255  		FileSystemInfo: &api.FileSystemInfoFacet{
  1256  			CreatedDateTime:      srcInfo.CreatedDateTime,
  1257  			LastModifiedDateTime: srcInfo.LastModifiedDateTime,
  1258  		},
  1259  	}
  1260  	var resp *http.Response
  1261  	var info api.Item
  1262  	err = f.pacer.Call(func() (bool, error) {
  1263  		resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
  1264  		return shouldRetry(resp, err)
  1265  	})
  1266  	if err != nil {
  1267  		return err
  1268  	}
  1269  
  1270  	srcFs.dirCache.FlushDir(srcRemote)
  1271  	return nil
  1272  }
  1273  
  1274  // DirCacheFlush resets the directory cache - used in testing as an
  1275  // optional interface
  1276  func (f *Fs) DirCacheFlush() {
  1277  	f.dirCache.ResetRoot()
  1278  }
  1279  
  1280  // About gets quota information
  1281  func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
  1282  	var drive api.Drive
  1283  	opts := rest.Opts{
  1284  		Method: "GET",
  1285  		Path:   "",
  1286  	}
  1287  	var resp *http.Response
  1288  	err = f.pacer.Call(func() (bool, error) {
  1289  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
  1290  		return shouldRetry(resp, err)
  1291  	})
  1292  	if err != nil {
  1293  		return nil, errors.Wrap(err, "about failed")
  1294  	}
  1295  	q := drive.Quota
  1296  	usage = &fs.Usage{
  1297  		Total:   fs.NewUsageValue(q.Total),     // quota of bytes that can be used
  1298  		Used:    fs.NewUsageValue(q.Used),      // bytes in use
  1299  		Trashed: fs.NewUsageValue(q.Deleted),   // bytes in trash
  1300  		Free:    fs.NewUsageValue(q.Remaining), // bytes which can be uploaded before reaching the quota
  1301  	}
  1302  	return usage, nil
  1303  }
  1304  
  1305  // Hashes returns the supported hash sets.
  1306  func (f *Fs) Hashes() hash.Set {
  1307  	if f.driveType == driveTypePersonal {
  1308  		return hash.Set(hash.SHA1)
  1309  	}
  1310  	return hash.Set(QuickXorHashType)
  1311  }
  1312  
  1313  // PublicLink returns a link for downloading without account.
  1314  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
  1315  	info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote))
  1316  	if err != nil {
  1317  		return "", err
  1318  	}
  1319  	opts := newOptsCall(info.GetID(), "POST", "/createLink")
  1320  
  1321  	share := api.CreateShareLinkRequest{
  1322  		Type:  "view",
  1323  		Scope: "anonymous",
  1324  	}
  1325  
  1326  	var resp *http.Response
  1327  	var result api.CreateShareLinkResponse
  1328  	err = f.pacer.Call(func() (bool, error) {
  1329  		resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
  1330  		return shouldRetry(resp, err)
  1331  	})
  1332  	if err != nil {
  1333  		fmt.Println(err)
  1334  		return "", err
  1335  	}
  1336  	return result.Link.WebURL, nil
  1337  }
  1338  
  1339  // ------------------------------------------------------------
  1340  
  1341  // Fs returns the parent Fs
  1342  func (o *Object) Fs() fs.Info {
  1343  	return o.fs
  1344  }
  1345  
  1346  // Return a string version
  1347  func (o *Object) String() string {
  1348  	if o == nil {
  1349  		return "<nil>"
  1350  	}
  1351  	return o.remote
  1352  }
  1353  
  1354  // Remote returns the remote path
  1355  func (o *Object) Remote() string {
  1356  	return o.remote
  1357  }
  1358  
  1359  // rootPath returns a path for use in server given a remote
  1360  func (f *Fs) rootPath(remote string) string {
  1361  	return f.rootSlash() + remote
  1362  }
  1363  
  1364  // rootPath returns a path for use in local functions
  1365  func (o *Object) rootPath() string {
  1366  	return o.fs.rootPath(o.remote)
  1367  }
  1368  
  1369  // srvPath returns a path for use in server given a remote
  1370  func (f *Fs) srvPath(remote string) string {
  1371  	return f.opt.Enc.FromStandardPath(f.rootSlash() + remote)
  1372  }
  1373  
  1374  // srvPath returns a path for use in server
  1375  func (o *Object) srvPath() string {
  1376  	return o.fs.srvPath(o.remote)
  1377  }
  1378  
  1379  // Hash returns the SHA-1 of an object returning a lowercase hex string
  1380  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1381  	if o.fs.driveType == driveTypePersonal {
  1382  		if t == hash.SHA1 {
  1383  			return o.sha1, nil
  1384  		}
  1385  	} else {
  1386  		if t == QuickXorHashType {
  1387  			return o.quickxorhash, nil
  1388  		}
  1389  	}
  1390  	return "", hash.ErrUnsupported
  1391  }
  1392  
  1393  // Size returns the size of an object in bytes
  1394  func (o *Object) Size() int64 {
  1395  	err := o.readMetaData(context.TODO())
  1396  	if err != nil {
  1397  		fs.Logf(o, "Failed to read metadata: %v", err)
  1398  		return 0
  1399  	}
  1400  	return o.size
  1401  }
  1402  
  1403  // setMetaData sets the metadata from info
  1404  func (o *Object) setMetaData(info *api.Item) (err error) {
  1405  	if info.GetFolder() != nil {
  1406  		return errors.Wrapf(fs.ErrorNotAFile, "%q", o.remote)
  1407  	}
  1408  	o.hasMetaData = true
  1409  	o.size = info.GetSize()
  1410  
  1411  	o.isOneNoteFile = info.GetPackageType() == api.PackageTypeOneNote
  1412  
  1413  	// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
  1414  	//
  1415  	// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
  1416  	file := info.GetFile()
  1417  	if file != nil {
  1418  		o.mimeType = file.MimeType
  1419  		if file.Hashes.Sha1Hash != "" {
  1420  			o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
  1421  		}
  1422  		if file.Hashes.QuickXorHash != "" {
  1423  			h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
  1424  			if err != nil {
  1425  				fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
  1426  			} else {
  1427  				o.quickxorhash = hex.EncodeToString(h)
  1428  			}
  1429  		}
  1430  	}
  1431  	fileSystemInfo := info.GetFileSystemInfo()
  1432  	if fileSystemInfo != nil {
  1433  		o.modTime = time.Time(fileSystemInfo.LastModifiedDateTime)
  1434  	} else {
  1435  		o.modTime = time.Time(info.GetLastModifiedDateTime())
  1436  	}
  1437  	o.id = info.GetID()
  1438  	return nil
  1439  }
  1440  
  1441  // readMetaData gets the metadata if it hasn't already been fetched
  1442  //
  1443  // it also sets the info
  1444  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1445  	if o.hasMetaData {
  1446  		return nil
  1447  	}
  1448  	info, _, err := o.fs.readMetaDataForPath(ctx, o.rootPath())
  1449  	if err != nil {
  1450  		if apiErr, ok := err.(*api.Error); ok {
  1451  			if apiErr.ErrorInfo.Code == "itemNotFound" {
  1452  				return fs.ErrorObjectNotFound
  1453  			}
  1454  		}
  1455  		return err
  1456  	}
  1457  	return o.setMetaData(info)
  1458  }
  1459  
  1460  // ModTime returns the modification time of the object
  1461  //
  1462  //
  1463  // It attempts to read the objects mtime and if that isn't present the
  1464  // LastModified returned in the http headers
  1465  func (o *Object) ModTime(ctx context.Context) time.Time {
  1466  	err := o.readMetaData(ctx)
  1467  	if err != nil {
  1468  		fs.Logf(o, "Failed to read metadata: %v", err)
  1469  		return time.Now()
  1470  	}
  1471  	return o.modTime
  1472  }
  1473  
  1474  // setModTime sets the modification time of the local fs object
  1475  func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
  1476  	var opts rest.Opts
  1477  	leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
  1478  	trueDirID, drive, rootURL := parseNormalizedID(directoryID)
  1479  	if drive != "" {
  1480  		opts = rest.Opts{
  1481  			Method:  "PATCH",
  1482  			RootURL: rootURL,
  1483  			Path:    "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
  1484  		}
  1485  	} else {
  1486  		opts = rest.Opts{
  1487  			Method: "PATCH",
  1488  			Path:   "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
  1489  		}
  1490  	}
  1491  	update := api.SetFileSystemInfo{
  1492  		FileSystemInfo: api.FileSystemInfoFacet{
  1493  			CreatedDateTime:      api.Timestamp(modTime),
  1494  			LastModifiedDateTime: api.Timestamp(modTime),
  1495  		},
  1496  	}
  1497  	var info *api.Item
  1498  	err := o.fs.pacer.Call(func() (bool, error) {
  1499  		resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
  1500  		return shouldRetry(resp, err)
  1501  	})
  1502  	return info, err
  1503  }
  1504  
  1505  // SetModTime sets the modification time of the local fs object
  1506  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1507  	info, err := o.setModTime(ctx, modTime)
  1508  	if err != nil {
  1509  		return err
  1510  	}
  1511  	return o.setMetaData(info)
  1512  }
  1513  
  1514  // Storable returns a boolean showing whether this object storable
  1515  func (o *Object) Storable() bool {
  1516  	return true
  1517  }
  1518  
  1519  // Open an object for read
  1520  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1521  	if o.id == "" {
  1522  		return nil, errors.New("can't download - no id")
  1523  	}
  1524  	if o.isOneNoteFile {
  1525  		return nil, errors.New("can't open a OneNote file")
  1526  	}
  1527  
  1528  	fs.FixRangeOption(options, o.size)
  1529  	var resp *http.Response
  1530  	opts := newOptsCall(o.id, "GET", "/content")
  1531  	opts.Options = options
  1532  
  1533  	err = o.fs.pacer.Call(func() (bool, error) {
  1534  		resp, err = o.fs.srv.Call(ctx, &opts)
  1535  		return shouldRetry(resp, err)
  1536  	})
  1537  	if err != nil {
  1538  		return nil, err
  1539  	}
  1540  
  1541  	if resp.StatusCode == http.StatusOK && resp.ContentLength > 0 && resp.Header.Get("Content-Range") == "" {
  1542  		//Overwrite size with actual size since size readings from Onedrive is unreliable.
  1543  		o.size = resp.ContentLength
  1544  	}
  1545  	return resp.Body, err
  1546  }
  1547  
  1548  // createUploadSession creates an upload session for the object
  1549  func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
  1550  	leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
  1551  	id, drive, rootURL := parseNormalizedID(directoryID)
  1552  	var opts rest.Opts
  1553  	if drive != "" {
  1554  		opts = rest.Opts{
  1555  			Method:  "POST",
  1556  			RootURL: rootURL,
  1557  			Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
  1558  				drive, id, rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
  1559  		}
  1560  	} else {
  1561  		opts = rest.Opts{
  1562  			Method: "POST",
  1563  			Path:   "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession",
  1564  		}
  1565  	}
  1566  	createRequest := api.CreateUploadRequest{}
  1567  	createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
  1568  	createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
  1569  	var resp *http.Response
  1570  	err = o.fs.pacer.Call(func() (bool, error) {
  1571  		resp, err = o.fs.srv.CallJSON(ctx, &opts, &createRequest, &response)
  1572  		if apiErr, ok := err.(*api.Error); ok {
  1573  			if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
  1574  				// Make the error more user-friendly
  1575  				err = errors.New(err.Error() + " (is it a OneNote file?)")
  1576  			}
  1577  		}
  1578  		return shouldRetry(resp, err)
  1579  	})
  1580  	return response, err
  1581  }
  1582  
  1583  // getPosition gets the current position in a multipart upload
  1584  func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err error) {
  1585  	opts := rest.Opts{
  1586  		Method:  "GET",
  1587  		RootURL: url,
  1588  	}
  1589  	var info api.UploadFragmentResponse
  1590  	var resp *http.Response
  1591  	err = o.fs.pacer.Call(func() (bool, error) {
  1592  		resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
  1593  		return shouldRetry(resp, err)
  1594  	})
  1595  	if err != nil {
  1596  		return 0, err
  1597  	}
  1598  	if len(info.NextExpectedRanges) != 1 {
  1599  		return 0, errors.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
  1600  	}
  1601  	position := info.NextExpectedRanges[0]
  1602  	i := strings.IndexByte(position, '-')
  1603  	if i < 0 {
  1604  		return 0, errors.Errorf("no '-' in next expected range: %q", position)
  1605  	}
  1606  	position = position[:i]
  1607  	pos, err = strconv.ParseInt(position, 10, 64)
  1608  	if err != nil {
  1609  		return 0, errors.Wrapf(err, "bad expected range: %q", position)
  1610  	}
  1611  	return pos, nil
  1612  }
  1613  
  1614  // uploadFragment uploads a part
  1615  func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64, options ...fs.OpenOption) (info *api.Item, err error) {
  1616  	//	var response api.UploadFragmentResponse
  1617  	var resp *http.Response
  1618  	var body []byte
  1619  	var skip = int64(0)
  1620  	err = o.fs.pacer.Call(func() (bool, error) {
  1621  		toSend := chunkSize - skip
  1622  		opts := rest.Opts{
  1623  			Method:        "PUT",
  1624  			RootURL:       url,
  1625  			ContentLength: &toSend,
  1626  			ContentRange:  fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
  1627  			Body:          chunk,
  1628  			Options:       options,
  1629  		}
  1630  		_, _ = chunk.Seek(skip, io.SeekStart)
  1631  		resp, err = o.fs.srv.Call(ctx, &opts)
  1632  		if err != nil && resp != nil && resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
  1633  			fs.Debugf(o, "Received 416 error - reading current position from server: %v", err)
  1634  			pos, posErr := o.getPosition(ctx, url)
  1635  			if posErr != nil {
  1636  				fs.Debugf(o, "Failed to read position: %v", posErr)
  1637  				return false, posErr
  1638  			}
  1639  			skip = pos - start
  1640  			fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
  1641  			switch {
  1642  			case skip < 0:
  1643  				return false, errors.Wrapf(err, "sent block already (skip %d < 0), can't rewind", skip)
  1644  			case skip > chunkSize:
  1645  				return false, errors.Wrapf(err, "position is in the future (skip %d > chunkSize %d), can't skip forward", skip, chunkSize)
  1646  			case skip == chunkSize:
  1647  				fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
  1648  				return false, nil
  1649  			}
  1650  			return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
  1651  		}
  1652  		if err != nil {
  1653  			return shouldRetry(resp, err)
  1654  		}
  1655  		body, err = rest.ReadBody(resp)
  1656  		if err != nil {
  1657  			return shouldRetry(resp, err)
  1658  		}
  1659  		if resp.StatusCode == 200 || resp.StatusCode == 201 {
  1660  			// we are done :)
  1661  			// read the item
  1662  			info = &api.Item{}
  1663  			return false, json.Unmarshal(body, info)
  1664  		}
  1665  		return false, nil
  1666  	})
  1667  	return info, err
  1668  }
  1669  
  1670  // cancelUploadSession cancels an upload session
  1671  func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error) {
  1672  	opts := rest.Opts{
  1673  		Method:     "DELETE",
  1674  		RootURL:    url,
  1675  		NoResponse: true,
  1676  	}
  1677  	var resp *http.Response
  1678  	err = o.fs.pacer.Call(func() (bool, error) {
  1679  		resp, err = o.fs.srv.Call(ctx, &opts)
  1680  		return shouldRetry(resp, err)
  1681  	})
  1682  	return
  1683  }
  1684  
  1685  // uploadMultipart uploads a file using multipart upload
  1686  func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
  1687  	if size <= 0 {
  1688  		return nil, errors.New("unknown-sized upload not supported")
  1689  	}
  1690  
  1691  	uploadURLChan := make(chan string, 1)
  1692  	gracefulCancel := func() {
  1693  		uploadURL, ok := <-uploadURLChan
  1694  		// Reading from uploadURLChan blocks the atexit process until
  1695  		// we are able to use uploadURL to cancel the upload
  1696  		if !ok { // createUploadSession failed - no need to cancel upload
  1697  			return
  1698  		}
  1699  
  1700  		fs.Debugf(o, "Cancelling multipart upload")
  1701  		cancelErr := o.cancelUploadSession(ctx, uploadURL)
  1702  		if cancelErr != nil {
  1703  			fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
  1704  		}
  1705  	}
  1706  	cancelFuncHandle := atexit.Register(gracefulCancel)
  1707  
  1708  	// Create upload session
  1709  	fs.Debugf(o, "Starting multipart upload")
  1710  	session, err := o.createUploadSession(ctx, modTime)
  1711  	if err != nil {
  1712  		close(uploadURLChan)
  1713  		atexit.Unregister(cancelFuncHandle)
  1714  		return nil, err
  1715  	}
  1716  	uploadURL := session.UploadURL
  1717  	uploadURLChan <- uploadURL
  1718  
  1719  	defer func() {
  1720  		if err != nil {
  1721  			fs.Debugf(o, "Error encountered during upload: %v", err)
  1722  			gracefulCancel()
  1723  		}
  1724  		atexit.Unregister(cancelFuncHandle)
  1725  	}()
  1726  
  1727  	// Upload the chunks
  1728  	remaining := size
  1729  	position := int64(0)
  1730  	for remaining > 0 {
  1731  		n := int64(o.fs.opt.ChunkSize)
  1732  		if remaining < n {
  1733  			n = remaining
  1734  		}
  1735  		seg := readers.NewRepeatableReader(io.LimitReader(in, n))
  1736  		fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
  1737  		info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
  1738  		if err != nil {
  1739  			return nil, err
  1740  		}
  1741  		remaining -= n
  1742  		position += n
  1743  	}
  1744  
  1745  	return info, nil
  1746  }
  1747  
  1748  // Update the content of a remote file within 4MB size in one single request
  1749  // This function will set modtime after uploading, which will create a new version for the remote file
  1750  func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
  1751  	if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
  1752  		return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
  1753  	}
  1754  
  1755  	fs.Debugf(o, "Starting singlepart upload")
  1756  	var resp *http.Response
  1757  	var opts rest.Opts
  1758  	leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
  1759  	trueDirID, drive, rootURL := parseNormalizedID(directoryID)
  1760  	if drive != "" {
  1761  		opts = rest.Opts{
  1762  			Method:        "PUT",
  1763  			RootURL:       rootURL,
  1764  			Path:          "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
  1765  			ContentLength: &size,
  1766  			Body:          in,
  1767  			Options:       options,
  1768  		}
  1769  	} else {
  1770  		opts = rest.Opts{
  1771  			Method:        "PUT",
  1772  			Path:          "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
  1773  			ContentLength: &size,
  1774  			Body:          in,
  1775  			Options:       options,
  1776  		}
  1777  	}
  1778  
  1779  	err = o.fs.pacer.Call(func() (bool, error) {
  1780  		resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
  1781  		if apiErr, ok := err.(*api.Error); ok {
  1782  			if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
  1783  				// Make the error more user-friendly
  1784  				err = errors.New(err.Error() + " (is it a OneNote file?)")
  1785  			}
  1786  		}
  1787  		return shouldRetry(resp, err)
  1788  	})
  1789  	if err != nil {
  1790  		return nil, err
  1791  	}
  1792  
  1793  	err = o.setMetaData(info)
  1794  	if err != nil {
  1795  		return nil, err
  1796  	}
  1797  	// Set the mod time now and read metadata
  1798  	return o.setModTime(ctx, modTime)
  1799  }
  1800  
  1801  // Update the object with the contents of the io.Reader, modTime and size
  1802  //
  1803  // The new object may have been created if an error is returned
  1804  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1805  	if o.hasMetaData && o.isOneNoteFile {
  1806  		return errors.New("can't upload content to a OneNote file")
  1807  	}
  1808  
  1809  	o.fs.tokenRenewer.Start()
  1810  	defer o.fs.tokenRenewer.Stop()
  1811  
  1812  	size := src.Size()
  1813  	modTime := src.ModTime(ctx)
  1814  
  1815  	var info *api.Item
  1816  	if size > 0 {
  1817  		info, err = o.uploadMultipart(ctx, in, size, modTime, options...)
  1818  	} else if size == 0 {
  1819  		info, err = o.uploadSinglepart(ctx, in, size, modTime, options...)
  1820  	} else {
  1821  		return errors.New("unknown-sized upload not supported")
  1822  	}
  1823  	if err != nil {
  1824  		return err
  1825  	}
  1826  
  1827  	return o.setMetaData(info)
  1828  }
  1829  
  1830  // Remove an object
  1831  func (o *Object) Remove(ctx context.Context) error {
  1832  	return o.fs.deleteObject(ctx, o.id)
  1833  }
  1834  
  1835  // MimeType of an Object if known, "" otherwise
  1836  func (o *Object) MimeType(ctx context.Context) string {
  1837  	return o.mimeType
  1838  }
  1839  
  1840  // ID returns the ID of the Object if known, or "" if not
  1841  func (o *Object) ID() string {
  1842  	return o.id
  1843  }
  1844  
  1845  func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
  1846  	id, drive, rootURL := parseNormalizedID(normalizedID)
  1847  
  1848  	if drive != "" {
  1849  		return rest.Opts{
  1850  			Method:  method,
  1851  			RootURL: rootURL,
  1852  			Path:    "/" + drive + "/items/" + id + route,
  1853  		}
  1854  	}
  1855  	return rest.Opts{
  1856  		Method: method,
  1857  		Path:   "/items/" + id + route,
  1858  	}
  1859  }
  1860  
  1861  // parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
  1862  // and returns itemID, driveID, rootURL.
  1863  // Such a normalized ID can come from (*Item).GetID()
  1864  func parseNormalizedID(ID string) (string, string, string) {
  1865  	if strings.Index(ID, "#") >= 0 {
  1866  		s := strings.Split(ID, "#")
  1867  		return s[1], s[0], graphURL + "/drives"
  1868  	}
  1869  	return ID, "", ""
  1870  }
  1871  
  1872  // getRelativePathInsideBase checks if `target` is inside `base`. If so, it
  1873  // returns a relative path for `target` based on `base` and a boolean `true`.
  1874  // Otherwise returns "", false.
  1875  func getRelativePathInsideBase(base, target string) (string, bool) {
  1876  	if base == "" {
  1877  		return target, true
  1878  	}
  1879  
  1880  	baseSlash := base + "/"
  1881  	if strings.HasPrefix(target+"/", baseSlash) {
  1882  		return target[len(baseSlash):], true
  1883  	}
  1884  	return "", false
  1885  }
  1886  
  1887  // Adds a ":" at the end of `remotePath` in a proper manner.
  1888  // If `remotePath` already ends with "/", change it to ":/"
  1889  // If `remotePath` is "", return "".
  1890  // A workaround for #2720 and #3039
  1891  func withTrailingColon(remotePath string) string {
  1892  	if remotePath == "" {
  1893  		return ""
  1894  	}
  1895  
  1896  	if strings.HasSuffix(remotePath, "/") {
  1897  		return remotePath[:len(remotePath)-1] + ":/"
  1898  	}
  1899  	return remotePath + ":"
  1900  }
  1901  
  1902  // Check the interfaces are satisfied
  1903  var (
  1904  	_ fs.Fs              = (*Fs)(nil)
  1905  	_ fs.Purger          = (*Fs)(nil)
  1906  	_ fs.Copier          = (*Fs)(nil)
  1907  	_ fs.Mover           = (*Fs)(nil)
  1908  	_ fs.DirMover        = (*Fs)(nil)
  1909  	_ fs.DirCacheFlusher = (*Fs)(nil)
  1910  	_ fs.Abouter         = (*Fs)(nil)
  1911  	_ fs.PublicLinker    = (*Fs)(nil)
  1912  	_ fs.Object          = (*Object)(nil)
  1913  	_ fs.MimeTyper       = &Object{}
  1914  	_ fs.IDer            = &Object{}
  1915  )