github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/onedrive/onedrive.go (about)

     1  // Package onedrive provides an interface to the Microsoft OneDrive
     2  // object storage system.
     3  package onedrive
     4  
     5  import (
     6  	"context"
     7  	"encoding/base64"
     8  	"encoding/hex"
     9  	"encoding/json"
    10  	"fmt"
    11  	"io"
    12  	"log"
    13  	"net/http"
    14  	"path"
    15  	"strings"
    16  	"time"
    17  
    18  	"github.com/ncw/rclone/lib/atexit"
    19  
    20  	"github.com/ncw/rclone/backend/onedrive/api"
    21  	"github.com/ncw/rclone/fs"
    22  	"github.com/ncw/rclone/fs/config"
    23  	"github.com/ncw/rclone/fs/config/configmap"
    24  	"github.com/ncw/rclone/fs/config/configstruct"
    25  	"github.com/ncw/rclone/fs/config/obscure"
    26  	"github.com/ncw/rclone/fs/fserrors"
    27  	"github.com/ncw/rclone/fs/hash"
    28  	"github.com/ncw/rclone/lib/dircache"
    29  	"github.com/ncw/rclone/lib/oauthutil"
    30  	"github.com/ncw/rclone/lib/pacer"
    31  	"github.com/ncw/rclone/lib/readers"
    32  	"github.com/ncw/rclone/lib/rest"
    33  	"github.com/pkg/errors"
    34  	"golang.org/x/oauth2"
    35  )
    36  
    37  const (
    38  	rcloneClientID              = "b15665d9-eda6-4092-8539-0eec376afd59"
    39  	rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
    40  	minSleep                    = 10 * time.Millisecond
    41  	maxSleep                    = 2 * time.Second
    42  	decayConstant               = 2 // bigger for slower decay, exponential
    43  	graphURL                    = "https://graph.microsoft.com/v1.0"
    44  	configDriveID               = "drive_id"
    45  	configDriveType             = "drive_type"
    46  	driveTypePersonal           = "personal"
    47  	driveTypeBusiness           = "business"
    48  	driveTypeSharepoint         = "documentLibrary"
    49  	defaultChunkSize            = 10 * fs.MebiByte
    50  	chunkSizeMultiple           = 320 * fs.KibiByte
    51  )
    52  
    53  // Globals
    54  var (
    55  	// Description of how to auth for this app for a business account
    56  	oauthConfig = &oauth2.Config{
    57  		Endpoint: oauth2.Endpoint{
    58  			AuthURL:  "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
    59  			TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
    60  		},
    61  		Scopes:       []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"},
    62  		ClientID:     rcloneClientID,
    63  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
    64  		RedirectURL:  oauthutil.RedirectLocalhostURL,
    65  	}
    66  )
    67  
    68  // Register with Fs
    69  func init() {
    70  	fs.Register(&fs.RegInfo{
    71  		Name:        "onedrive",
    72  		Description: "Microsoft OneDrive",
    73  		NewFs:       NewFs,
    74  		Config: func(name string, m configmap.Mapper) {
    75  			err := oauthutil.Config("onedrive", name, m, oauthConfig)
    76  			if err != nil {
    77  				log.Fatalf("Failed to configure token: %v", err)
    78  				return
    79  			}
    80  
    81  			// Stop if we are running non-interactive config
    82  			if fs.Config.AutoConfirm {
    83  				return
    84  			}
    85  
    86  			type driveResource struct {
    87  				DriveID   string `json:"id"`
    88  				DriveName string `json:"name"`
    89  				DriveType string `json:"driveType"`
    90  			}
    91  			type drivesResponse struct {
    92  				Drives []driveResource `json:"value"`
    93  			}
    94  
    95  			type siteResource struct {
    96  				SiteID   string `json:"id"`
    97  				SiteName string `json:"displayName"`
    98  				SiteURL  string `json:"webUrl"`
    99  			}
   100  			type siteResponse struct {
   101  				Sites []siteResource `json:"value"`
   102  			}
   103  
   104  			oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
   105  			if err != nil {
   106  				log.Fatalf("Failed to configure OneDrive: %v", err)
   107  			}
   108  			srv := rest.NewClient(oAuthClient)
   109  
   110  			var opts rest.Opts
   111  			var finalDriveID string
   112  			var siteID string
   113  			switch config.Choose("Your choice",
   114  				[]string{"onedrive", "sharepoint", "driveid", "siteid", "search"},
   115  				[]string{"OneDrive Personal or Business", "Root Sharepoint site", "Type in driveID", "Type in SiteID", "Search a Sharepoint site"},
   116  				false) {
   117  
   118  			case "onedrive":
   119  				opts = rest.Opts{
   120  					Method:  "GET",
   121  					RootURL: graphURL,
   122  					Path:    "/me/drives",
   123  				}
   124  			case "sharepoint":
   125  				opts = rest.Opts{
   126  					Method:  "GET",
   127  					RootURL: graphURL,
   128  					Path:    "/sites/root/drives",
   129  				}
   130  			case "driveid":
   131  				fmt.Printf("Paste your Drive ID here> ")
   132  				finalDriveID = config.ReadLine()
   133  			case "siteid":
   134  				fmt.Printf("Paste your Site ID here> ")
   135  				siteID = config.ReadLine()
   136  			case "search":
   137  				fmt.Printf("What to search for> ")
   138  				searchTerm := config.ReadLine()
   139  				opts = rest.Opts{
   140  					Method:  "GET",
   141  					RootURL: graphURL,
   142  					Path:    "/sites?search=" + searchTerm,
   143  				}
   144  
   145  				sites := siteResponse{}
   146  				_, err := srv.CallJSON(&opts, nil, &sites)
   147  				if err != nil {
   148  					log.Fatalf("Failed to query available sites: %v", err)
   149  				}
   150  
   151  				if len(sites.Sites) == 0 {
   152  					log.Fatalf("Search for '%s' returned no results", searchTerm)
   153  				} else {
   154  					fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
   155  					for index, site := range sites.Sites {
   156  						fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
   157  					}
   158  					siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
   159  				}
   160  			}
   161  
   162  			// if we have a siteID we need to ask for the drives
   163  			if siteID != "" {
   164  				opts = rest.Opts{
   165  					Method:  "GET",
   166  					RootURL: graphURL,
   167  					Path:    "/sites/" + siteID + "/drives",
   168  				}
   169  			}
   170  
   171  			// We don't have the final ID yet?
   172  			// query Microsoft Graph
   173  			if finalDriveID == "" {
   174  				drives := drivesResponse{}
   175  				_, err := srv.CallJSON(&opts, nil, &drives)
   176  				if err != nil {
   177  					log.Fatalf("Failed to query available drives: %v", err)
   178  				}
   179  
   180  				if len(drives.Drives) == 0 {
   181  					log.Fatalf("No drives found")
   182  				} else {
   183  					fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
   184  					for index, drive := range drives.Drives {
   185  						fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
   186  					}
   187  					finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
   188  				}
   189  			}
   190  
   191  			// Test the driveID and get drive type
   192  			opts = rest.Opts{
   193  				Method:  "GET",
   194  				RootURL: graphURL,
   195  				Path:    "/drives/" + finalDriveID + "/root"}
   196  			var rootItem api.Item
   197  			_, err = srv.CallJSON(&opts, nil, &rootItem)
   198  			if err != nil {
   199  				log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
   200  			}
   201  
   202  			fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
   203  			// This does not work, YET :)
   204  			if !config.ConfirmWithConfig(m, "config_drive_ok", true) {
   205  				log.Fatalf("Cancelled by user")
   206  			}
   207  
   208  			m.Set(configDriveID, finalDriveID)
   209  			m.Set(configDriveType, rootItem.ParentReference.DriveType)
   210  			config.SaveConfig()
   211  		},
   212  		Options: []fs.Option{{
   213  			Name: config.ConfigClientID,
   214  			Help: "Microsoft App Client Id\nLeave blank normally.",
   215  		}, {
   216  			Name: config.ConfigClientSecret,
   217  			Help: "Microsoft App Client Secret\nLeave blank normally.",
   218  		}, {
   219  			Name: "chunk_size",
   220  			Help: `Chunk size to upload files with - must be multiple of 320k.
   221  
   222  Above this size files will be chunked - must be multiple of 320k. Note
   223  that the chunks will be buffered into memory.`,
   224  			Default:  defaultChunkSize,
   225  			Advanced: true,
   226  		}, {
   227  			Name:     "drive_id",
   228  			Help:     "The ID of the drive to use",
   229  			Default:  "",
   230  			Advanced: true,
   231  		}, {
   232  			Name:     "drive_type",
   233  			Help:     "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )",
   234  			Default:  "",
   235  			Advanced: true,
   236  		}, {
   237  			Name: "expose_onenote_files",
   238  			Help: `Set to make OneNote files show up in directory listings.
   239  
   240  By default rclone will hide OneNote files in directory listings because
   241  operations like "Open" and "Update" won't work on them.  But this
   242  behaviour may also prevent you from deleting them.  If you want to
   243  delete OneNote files or otherwise want them to show up in directory
   244  listing, set this option.`,
   245  			Default:  false,
   246  			Advanced: true,
   247  		}},
   248  	})
   249  }
   250  
   251  // Options defines the configuration for this backend
   252  type Options struct {
   253  	ChunkSize          fs.SizeSuffix `config:"chunk_size"`
   254  	DriveID            string        `config:"drive_id"`
   255  	DriveType          string        `config:"drive_type"`
   256  	ExposeOneNoteFiles bool          `config:"expose_onenote_files"`
   257  }
   258  
   259  // Fs represents a remote one drive
   260  type Fs struct {
   261  	name         string             // name of this remote
   262  	root         string             // the path we are working on
   263  	opt          Options            // parsed options
   264  	features     *fs.Features       // optional features
   265  	srv          *rest.Client       // the connection to the one drive server
   266  	dirCache     *dircache.DirCache // Map of directory path to directory id
   267  	pacer        *fs.Pacer          // pacer for API calls
   268  	tokenRenewer *oauthutil.Renew   // renew the token on expiry
   269  	driveID      string             // ID to use for querying Microsoft Graph
   270  	driveType    string             // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
   271  }
   272  
   273  // Object describes a one drive object
   274  //
   275  // Will definitely have info but maybe not meta
   276  type Object struct {
   277  	fs            *Fs       // what this object is part of
   278  	remote        string    // The remote path
   279  	hasMetaData   bool      // whether info below has been set
   280  	isOneNoteFile bool      // Whether the object is a OneNote file
   281  	size          int64     // size of the object
   282  	modTime       time.Time // modification time of the object
   283  	id            string    // ID of the object
   284  	sha1          string    // SHA-1 of the object content
   285  	quickxorhash  string    // QuickXorHash of the object content
   286  	mimeType      string    // Content-Type of object from server (may not be as uploaded)
   287  }
   288  
   289  // ------------------------------------------------------------
   290  
   291  // Name of the remote (as passed into NewFs)
   292  func (f *Fs) Name() string {
   293  	return f.name
   294  }
   295  
   296  // Root of the remote (as passed into NewFs)
   297  func (f *Fs) Root() string {
   298  	return f.root
   299  }
   300  
   301  // String converts this Fs to a string
   302  func (f *Fs) String() string {
   303  	return fmt.Sprintf("One drive root '%s'", f.root)
   304  }
   305  
   306  // Features returns the optional features of this Fs
   307  func (f *Fs) Features() *fs.Features {
   308  	return f.features
   309  }
   310  
   311  // parsePath parses an one drive 'url'
   312  func parsePath(path string) (root string) {
   313  	root = strings.Trim(path, "/")
   314  	return
   315  }
   316  
   317  // retryErrorCodes is a slice of error codes that we will retry
   318  var retryErrorCodes = []int{
   319  	429, // Too Many Requests.
   320  	500, // Internal Server Error
   321  	502, // Bad Gateway
   322  	503, // Service Unavailable
   323  	504, // Gateway Timeout
   324  	509, // Bandwidth Limit Exceeded
   325  }
   326  
   327  // shouldRetry returns a boolean as to whether this resp and err
   328  // deserve to be retried.  It returns the err as a convenience
   329  func shouldRetry(resp *http.Response, err error) (bool, error) {
   330  	authRetry := false
   331  
   332  	if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
   333  		authRetry = true
   334  		fs.Debugf(nil, "Should retry: %v", err)
   335  	}
   336  	return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   337  }
   338  
   339  // readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
   340  // if `relPath` == "", it reads the metadata for the item with that ID.
   341  //
   342  // We address items using the pattern `drives/driveID/items/itemID:/relativePath`
   343  // instead of simply using `drives/driveID/root:/itemPath` because it works for
   344  // "shared with me" folders in OneDrive Personal (See #2536, #2778)
   345  // This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
   346  func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
   347  	opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
   348  	err = f.pacer.Call(func() (bool, error) {
   349  		resp, err = f.srv.CallJSON(&opts, nil, &info)
   350  		return shouldRetry(resp, err)
   351  	})
   352  
   353  	return info, resp, err
   354  }
   355  
   356  // readMetaDataForPath reads the metadata from the path (relative to the absolute root)
   357  func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, resp *http.Response, err error) {
   358  	firstSlashIndex := strings.IndexRune(path, '/')
   359  
   360  	if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
   361  		var opts rest.Opts
   362  		if len(path) == 0 {
   363  			opts = rest.Opts{
   364  				Method: "GET",
   365  				Path:   "/root",
   366  			}
   367  		} else {
   368  			opts = rest.Opts{
   369  				Method: "GET",
   370  				Path:   "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
   371  			}
   372  		}
   373  		err = f.pacer.Call(func() (bool, error) {
   374  			resp, err = f.srv.CallJSON(&opts, nil, &info)
   375  			return shouldRetry(resp, err)
   376  		})
   377  		return info, resp, err
   378  	}
   379  
   380  	// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
   381  	// For OneDrive Personal, we need to consider the "shared with me" folders.
   382  	// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
   383  	// by its path relative to the folder's ID relative to the sharer's driveID.
   384  	// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
   385  	// So we read metadata relative to a suitable folder's normalized ID.
   386  	var dirCacheFoundRoot bool
   387  	var rootNormalizedID string
   388  	if f.dirCache != nil {
   389  		var dirCacheRootIDExists bool
   390  		rootNormalizedID, dirCacheRootIDExists = f.dirCache.Get("")
   391  		if f.root == "" {
   392  			// if f.root == "", it means f.root is the absolute root of the drive
   393  			// and its ID should have been found in NewFs
   394  			dirCacheFoundRoot = dirCacheRootIDExists
   395  		} else if _, err := f.dirCache.RootParentID(); err == nil {
   396  			// if root is in a folder, it must have a parent folder, and
   397  			// if dirCache has found root in NewFs, the parent folder's ID
   398  			// should be present.
   399  			// This RootParentID() check is a fix for #3164 which describes
   400  			// a possible case where the root is not found.
   401  			dirCacheFoundRoot = dirCacheRootIDExists
   402  		}
   403  	}
   404  
   405  	relPath, insideRoot := getRelativePathInsideBase(f.root, path)
   406  	var firstDir, baseNormalizedID string
   407  	if !insideRoot || !dirCacheFoundRoot {
   408  		// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
   409  		firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
   410  		info, resp, err := f.readMetaDataForPath(ctx, firstDir)
   411  		if err != nil {
   412  			return info, resp, err
   413  		}
   414  		baseNormalizedID = info.GetID()
   415  	} else {
   416  		if f.root != "" {
   417  			// Read metadata based on root
   418  			baseNormalizedID = rootNormalizedID
   419  		} else {
   420  			// Read metadata based on firstDir
   421  			firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
   422  			baseNormalizedID, err = f.dirCache.FindDir(ctx, firstDir, false)
   423  			if err != nil {
   424  				return nil, nil, err
   425  			}
   426  		}
   427  	}
   428  
   429  	return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
   430  }
   431  
   432  // errorHandler parses a non 2xx error response into an error
   433  func errorHandler(resp *http.Response) error {
   434  	// Decode error response
   435  	errResponse := new(api.Error)
   436  	err := rest.DecodeJSON(resp, &errResponse)
   437  	if err != nil {
   438  		fs.Debugf(nil, "Couldn't decode error response: %v", err)
   439  	}
   440  	if errResponse.ErrorInfo.Code == "" {
   441  		errResponse.ErrorInfo.Code = resp.Status
   442  	}
   443  	return errResponse
   444  }
   445  
   446  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   447  	const minChunkSize = fs.Byte
   448  	if cs%chunkSizeMultiple != 0 {
   449  		return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
   450  	}
   451  	if cs < minChunkSize {
   452  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   453  	}
   454  	return nil
   455  }
   456  
   457  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   458  	err = checkUploadChunkSize(cs)
   459  	if err == nil {
   460  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   461  	}
   462  	return
   463  }
   464  
   465  // NewFs constructs an Fs from the path, container:path
   466  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   467  	ctx := context.Background()
   468  	// Parse config into Options struct
   469  	opt := new(Options)
   470  	err := configstruct.Set(m, opt)
   471  	if err != nil {
   472  		return nil, err
   473  	}
   474  
   475  	err = checkUploadChunkSize(opt.ChunkSize)
   476  	if err != nil {
   477  		return nil, errors.Wrap(err, "onedrive: chunk size")
   478  	}
   479  
   480  	if opt.DriveID == "" || opt.DriveType == "" {
   481  		return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
   482  	}
   483  
   484  	root = parsePath(root)
   485  	oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
   486  	if err != nil {
   487  		return nil, errors.Wrap(err, "failed to configure OneDrive")
   488  	}
   489  
   490  	f := &Fs{
   491  		name:      name,
   492  		root:      root,
   493  		opt:       *opt,
   494  		driveID:   opt.DriveID,
   495  		driveType: opt.DriveType,
   496  		srv:       rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
   497  		pacer:     fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   498  	}
   499  	f.features = (&fs.Features{
   500  		CaseInsensitive:         true,
   501  		ReadMimeType:            true,
   502  		CanHaveEmptyDirectories: true,
   503  	}).Fill(f)
   504  	f.srv.SetErrorHandler(errorHandler)
   505  
   506  	// Renew the token in the background
   507  	f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
   508  		_, _, err := f.readMetaDataForPath(ctx, "")
   509  		return err
   510  	})
   511  
   512  	// Get rootID
   513  	rootInfo, _, err := f.readMetaDataForPath(ctx, "")
   514  	if err != nil || rootInfo.GetID() == "" {
   515  		return nil, errors.Wrap(err, "failed to get root")
   516  	}
   517  
   518  	f.dirCache = dircache.New(root, rootInfo.GetID(), f)
   519  
   520  	// Find the current root
   521  	err = f.dirCache.FindRoot(ctx, false)
   522  	if err != nil {
   523  		// Assume it is a file
   524  		newRoot, remote := dircache.SplitPath(root)
   525  		tempF := *f
   526  		tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
   527  		tempF.root = newRoot
   528  		// Make new Fs which is the parent
   529  		err = tempF.dirCache.FindRoot(ctx, false)
   530  		if err != nil {
   531  			// No root so return old f
   532  			return f, nil
   533  		}
   534  		_, err := tempF.newObjectWithInfo(ctx, remote, nil)
   535  		if err != nil {
   536  			if err == fs.ErrorObjectNotFound {
   537  				// File doesn't exist so return old f
   538  				return f, nil
   539  			}
   540  			return nil, err
   541  		}
   542  		// XXX: update the old f here instead of returning tempF, since
   543  		// `features` were already filled with functions having *f as a receiver.
   544  		// See https://github.com/ncw/rclone/issues/2182
   545  		f.dirCache = tempF.dirCache
   546  		f.root = tempF.root
   547  		// return an error with an fs which points to the parent
   548  		return f, fs.ErrorIsFile
   549  	}
   550  	return f, nil
   551  }
   552  
   553  // rootSlash returns root with a slash on if it is empty, otherwise empty string
   554  func (f *Fs) rootSlash() string {
   555  	if f.root == "" {
   556  		return f.root
   557  	}
   558  	return f.root + "/"
   559  }
   560  
   561  // Return an Object from a path
   562  //
   563  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   564  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
   565  	o := &Object{
   566  		fs:     f,
   567  		remote: remote,
   568  	}
   569  	var err error
   570  	if info != nil {
   571  		// Set info
   572  		err = o.setMetaData(info)
   573  	} else {
   574  		err = o.readMetaData(ctx) // reads info and meta, returning an error
   575  	}
   576  	if err != nil {
   577  		return nil, err
   578  	}
   579  	return o, nil
   580  }
   581  
   582  // NewObject finds the Object at remote.  If it can't be found
   583  // it returns the error fs.ErrorObjectNotFound.
   584  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   585  	return f.newObjectWithInfo(ctx, remote, nil)
   586  }
   587  
   588  // FindLeaf finds a directory of name leaf in the folder with ID pathID
   589  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
   590  	// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
   591  	_, ok := f.dirCache.GetInv(pathID)
   592  	if !ok {
   593  		return "", false, errors.New("couldn't find parent ID")
   594  	}
   595  	info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
   596  	if err != nil {
   597  		if resp != nil && resp.StatusCode == http.StatusNotFound {
   598  			return "", false, nil
   599  		}
   600  		return "", false, err
   601  	}
   602  	if info.GetPackageType() == api.PackageTypeOneNote {
   603  		return "", false, errors.New("found OneNote file when looking for folder")
   604  	}
   605  	if info.GetFolder() == nil {
   606  		return "", false, errors.New("found file when looking for folder")
   607  	}
   608  	return info.GetID(), true, nil
   609  }
   610  
   611  // CreateDir makes a directory with pathID as parent and name leaf
   612  func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) {
   613  	// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
   614  	var resp *http.Response
   615  	var info *api.Item
   616  	opts := newOptsCall(dirID, "POST", "/children")
   617  	mkdir := api.CreateItemRequest{
   618  		Name:             replaceReservedChars(leaf),
   619  		ConflictBehavior: "fail",
   620  	}
   621  	err = f.pacer.Call(func() (bool, error) {
   622  		resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
   623  		return shouldRetry(resp, err)
   624  	})
   625  	if err != nil {
   626  		//fmt.Printf("...Error %v\n", err)
   627  		return "", err
   628  	}
   629  
   630  	//fmt.Printf("...Id %q\n", *info.Id)
   631  	return info.GetID(), nil
   632  }
   633  
   634  // list the objects into the function supplied
   635  //
   636  // If directories is set it only sends directories
   637  // User function to process a File item from listAll
   638  //
   639  // Should return true to finish processing
   640  type listAllFn func(*api.Item) bool
   641  
   642  // Lists the directory required calling the user function on each item found
   643  //
   644  // If the user fn ever returns true then it early exits with found = true
   645  func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
   646  	// Top parameter asks for bigger pages of data
   647  	// https://dev.onedrive.com/odata/optional-query-parameters.htm
   648  	opts := newOptsCall(dirID, "GET", "/children?$top=1000")
   649  OUTER:
   650  	for {
   651  		var result api.ListChildrenResponse
   652  		var resp *http.Response
   653  		err = f.pacer.Call(func() (bool, error) {
   654  			resp, err = f.srv.CallJSON(&opts, nil, &result)
   655  			return shouldRetry(resp, err)
   656  		})
   657  		if err != nil {
   658  			return found, errors.Wrap(err, "couldn't list files")
   659  		}
   660  		if len(result.Value) == 0 {
   661  			break
   662  		}
   663  		for i := range result.Value {
   664  			item := &result.Value[i]
   665  			isFolder := item.GetFolder() != nil
   666  			if isFolder {
   667  				if filesOnly {
   668  					continue
   669  				}
   670  			} else {
   671  				if directoriesOnly {
   672  					continue
   673  				}
   674  			}
   675  			if item.Deleted != nil {
   676  				continue
   677  			}
   678  			item.Name = restoreReservedChars(item.GetName())
   679  			if fn(item) {
   680  				found = true
   681  				break OUTER
   682  			}
   683  		}
   684  		if result.NextLink == "" {
   685  			break
   686  		}
   687  		opts.Path = ""
   688  		opts.RootURL = result.NextLink
   689  	}
   690  	return
   691  }
   692  
   693  // List the objects and directories in dir into entries.  The
   694  // entries can be returned in any order but should be for a
   695  // complete directory.
   696  //
   697  // dir should be "" to list the root, and should not have
   698  // trailing slashes.
   699  //
   700  // This should return ErrDirNotFound if the directory isn't
   701  // found.
   702  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   703  	err = f.dirCache.FindRoot(ctx, false)
   704  	if err != nil {
   705  		return nil, err
   706  	}
   707  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
   708  	if err != nil {
   709  		return nil, err
   710  	}
   711  	var iErr error
   712  	_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
   713  		if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
   714  			fs.Debugf(info.Name, "OneNote file not shown in directory listing")
   715  			return false
   716  		}
   717  
   718  		remote := path.Join(dir, info.GetName())
   719  		folder := info.GetFolder()
   720  		if folder != nil {
   721  			// cache the directory ID for later lookups
   722  			id := info.GetID()
   723  			f.dirCache.Put(remote, id)
   724  			d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
   725  			d.SetItems(folder.ChildCount)
   726  			entries = append(entries, d)
   727  		} else {
   728  			o, err := f.newObjectWithInfo(ctx, remote, info)
   729  			if err != nil {
   730  				iErr = err
   731  				return true
   732  			}
   733  			entries = append(entries, o)
   734  		}
   735  		return false
   736  	})
   737  	if err != nil {
   738  		return nil, err
   739  	}
   740  	if iErr != nil {
   741  		return nil, iErr
   742  	}
   743  	return entries, nil
   744  }
   745  
   746  // Creates from the parameters passed in a half finished Object which
   747  // must have setMetaData called on it
   748  //
   749  // Returns the object, leaf, directoryID and error
   750  //
   751  // Used to create new objects
   752  func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
   753  	// Create the directory for the object if it doesn't exist
   754  	leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
   755  	if err != nil {
   756  		return nil, leaf, directoryID, err
   757  	}
   758  	// Temporary Object under construction
   759  	o = &Object{
   760  		fs:     f,
   761  		remote: remote,
   762  	}
   763  	return o, leaf, directoryID, nil
   764  }
   765  
   766  // Put the object into the container
   767  //
   768  // Copy the reader in to the new object which is returned
   769  //
   770  // The new object may have been created if an error is returned
   771  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   772  	remote := src.Remote()
   773  	size := src.Size()
   774  	modTime := src.ModTime(ctx)
   775  
   776  	o, _, _, err := f.createObject(ctx, remote, modTime, size)
   777  	if err != nil {
   778  		return nil, err
   779  	}
   780  	return o, o.Update(ctx, in, src, options...)
   781  }
   782  
   783  // Mkdir creates the container if it doesn't exist
   784  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   785  	err := f.dirCache.FindRoot(ctx, true)
   786  	if err != nil {
   787  		return err
   788  	}
   789  	if dir != "" {
   790  		_, err = f.dirCache.FindDir(ctx, dir, true)
   791  	}
   792  	return err
   793  }
   794  
   795  // deleteObject removes an object by ID
   796  func (f *Fs) deleteObject(id string) error {
   797  	opts := newOptsCall(id, "DELETE", "")
   798  	opts.NoResponse = true
   799  
   800  	return f.pacer.Call(func() (bool, error) {
   801  		resp, err := f.srv.Call(&opts)
   802  		return shouldRetry(resp, err)
   803  	})
   804  }
   805  
   806  // purgeCheck removes the root directory, if check is set then it
   807  // refuses to do so if it has anything in
   808  func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
   809  	root := path.Join(f.root, dir)
   810  	if root == "" {
   811  		return errors.New("can't purge root directory")
   812  	}
   813  	dc := f.dirCache
   814  	err := dc.FindRoot(ctx, false)
   815  	if err != nil {
   816  		return err
   817  	}
   818  	rootID, err := dc.FindDir(ctx, dir, false)
   819  	if err != nil {
   820  		return err
   821  	}
   822  	if check {
   823  		// check to see if there are any items
   824  		found, err := f.listAll(rootID, false, false, func(item *api.Item) bool {
   825  			return true
   826  		})
   827  		if err != nil {
   828  			return err
   829  		}
   830  		if found {
   831  			return fs.ErrorDirectoryNotEmpty
   832  		}
   833  	}
   834  	err = f.deleteObject(rootID)
   835  	if err != nil {
   836  		return err
   837  	}
   838  	f.dirCache.FlushDir(dir)
   839  	return nil
   840  }
   841  
   842  // Rmdir deletes the root folder
   843  //
   844  // Returns an error if it isn't empty
   845  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   846  	return f.purgeCheck(ctx, dir, true)
   847  }
   848  
   849  // Precision return the precision of this Fs
   850  func (f *Fs) Precision() time.Duration {
   851  	return time.Second
   852  }
   853  
   854  // waitForJob waits for the job with status in url to complete
   855  func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
   856  	deadline := time.Now().Add(fs.Config.Timeout)
   857  	for time.Now().Before(deadline) {
   858  		var resp *http.Response
   859  		var err error
   860  		var body []byte
   861  		err = f.pacer.Call(func() (bool, error) {
   862  			resp, err = http.Get(location)
   863  			if err != nil {
   864  				return fserrors.ShouldRetry(err), err
   865  			}
   866  			body, err = rest.ReadBody(resp)
   867  			return fserrors.ShouldRetry(err), err
   868  		})
   869  		if err != nil {
   870  			return err
   871  		}
   872  		// Try to decode the body first as an api.AsyncOperationStatus
   873  		var status api.AsyncOperationStatus
   874  		err = json.Unmarshal(body, &status)
   875  		if err != nil {
   876  			return errors.Wrapf(err, "async status result not JSON: %q", body)
   877  		}
   878  
   879  		switch status.Status {
   880  		case "failed":
   881  		case "deleteFailed":
   882  			{
   883  				return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
   884  			}
   885  		case "completed":
   886  			err = o.readMetaData(ctx)
   887  			return errors.Wrapf(err, "async operation completed but readMetaData failed")
   888  		}
   889  
   890  		time.Sleep(1 * time.Second)
   891  	}
   892  	return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
   893  }
   894  
   895  // Copy src to this remote using server side copy operations.
   896  //
   897  // This is stored with the remote path given
   898  //
   899  // It returns the destination Object and a possible error
   900  //
   901  // Will only be called if src.Fs().Name() == f.Name()
   902  //
   903  // If it isn't possible then return fs.ErrorCantCopy
   904  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   905  	srcObj, ok := src.(*Object)
   906  	if !ok {
   907  		fs.Debugf(src, "Can't copy - not same remote type")
   908  		return nil, fs.ErrorCantCopy
   909  	}
   910  	err := srcObj.readMetaData(ctx)
   911  	if err != nil {
   912  		return nil, err
   913  	}
   914  
   915  	srcPath := srcObj.fs.rootSlash() + srcObj.remote
   916  	dstPath := f.rootSlash() + remote
   917  	if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
   918  		return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
   919  	}
   920  
   921  	// Create temporary object
   922  	dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
   923  	if err != nil {
   924  		return nil, err
   925  	}
   926  
   927  	// Copy the object
   928  	opts := newOptsCall(srcObj.id, "POST", "/copy")
   929  	opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
   930  	opts.NoResponse = true
   931  
   932  	id, dstDriveID, _ := parseNormalizedID(directoryID)
   933  
   934  	replacedLeaf := replaceReservedChars(leaf)
   935  	copyReq := api.CopyItemRequest{
   936  		Name: &replacedLeaf,
   937  		ParentReference: api.ItemReference{
   938  			DriveID: dstDriveID,
   939  			ID:      id,
   940  		},
   941  	}
   942  	var resp *http.Response
   943  	err = f.pacer.Call(func() (bool, error) {
   944  		resp, err = f.srv.CallJSON(&opts, &copyReq, nil)
   945  		return shouldRetry(resp, err)
   946  	})
   947  	if err != nil {
   948  		return nil, err
   949  	}
   950  
   951  	// read location header
   952  	location := resp.Header.Get("Location")
   953  	if location == "" {
   954  		return nil, errors.New("didn't receive location header in copy response")
   955  	}
   956  
   957  	// Wait for job to finish
   958  	err = f.waitForJob(ctx, location, dstObj)
   959  	if err != nil {
   960  		return nil, err
   961  	}
   962  
   963  	// Copy does NOT copy the modTime from the source and there seems to
   964  	// be no way to set date before
   965  	// This will create TWO versions on OneDrive
   966  	err = dstObj.SetModTime(ctx, srcObj.ModTime(ctx))
   967  	if err != nil {
   968  		return nil, err
   969  	}
   970  
   971  	return dstObj, nil
   972  }
   973  
   974  // Purge deletes all the files and the container
   975  //
   976  // Optional interface: Only implement this if you have a way of
   977  // deleting all the files quicker than just running Remove() on the
   978  // result of List()
   979  func (f *Fs) Purge(ctx context.Context) error {
   980  	return f.purgeCheck(ctx, "", false)
   981  }
   982  
   983  // Move src to this remote using server side move operations.
   984  //
   985  // This is stored with the remote path given
   986  //
   987  // It returns the destination Object and a possible error
   988  //
   989  // Will only be called if src.Fs().Name() == f.Name()
   990  //
   991  // If it isn't possible then return fs.ErrorCantMove
   992  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   993  	srcObj, ok := src.(*Object)
   994  	if !ok {
   995  		fs.Debugf(src, "Can't move - not same remote type")
   996  		return nil, fs.ErrorCantMove
   997  	}
   998  
   999  	// Create temporary object
  1000  	dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
  1001  	if err != nil {
  1002  		return nil, err
  1003  	}
  1004  
  1005  	id, dstDriveID, _ := parseNormalizedID(directoryID)
  1006  	_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
  1007  
  1008  	if dstDriveID != srcObjDriveID {
  1009  		// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
  1010  		// "Items cannot be moved between Drives using this request."
  1011  		return nil, fs.ErrorCantMove
  1012  	}
  1013  
  1014  	// Move the object
  1015  	opts := newOptsCall(srcObj.id, "PATCH", "")
  1016  
  1017  	move := api.MoveItemRequest{
  1018  		Name: replaceReservedChars(leaf),
  1019  		ParentReference: &api.ItemReference{
  1020  			DriveID: dstDriveID,
  1021  			ID:      id,
  1022  		},
  1023  		// We set the mod time too as it gets reset otherwise
  1024  		FileSystemInfo: &api.FileSystemInfoFacet{
  1025  			CreatedDateTime:      api.Timestamp(srcObj.modTime),
  1026  			LastModifiedDateTime: api.Timestamp(srcObj.modTime),
  1027  		},
  1028  	}
  1029  	var resp *http.Response
  1030  	var info api.Item
  1031  	err = f.pacer.Call(func() (bool, error) {
  1032  		resp, err = f.srv.CallJSON(&opts, &move, &info)
  1033  		return shouldRetry(resp, err)
  1034  	})
  1035  	if err != nil {
  1036  		return nil, err
  1037  	}
  1038  
  1039  	err = dstObj.setMetaData(&info)
  1040  	if err != nil {
  1041  		return nil, err
  1042  	}
  1043  	return dstObj, nil
  1044  }
  1045  
  1046  // DirMove moves src, srcRemote to this remote at dstRemote
  1047  // using server side move operations.
  1048  //
  1049  // Will only be called if src.Fs().Name() == f.Name()
  1050  //
  1051  // If it isn't possible then return fs.ErrorCantDirMove
  1052  //
  1053  // If destination exists then return fs.ErrorDirExists
  1054  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1055  	srcFs, ok := src.(*Fs)
  1056  	if !ok {
  1057  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  1058  		return fs.ErrorCantDirMove
  1059  	}
  1060  	srcPath := path.Join(srcFs.root, srcRemote)
  1061  	dstPath := path.Join(f.root, dstRemote)
  1062  
  1063  	// Refuse to move to or from the root
  1064  	if srcPath == "" || dstPath == "" {
  1065  		fs.Debugf(src, "DirMove error: Can't move root")
  1066  		return errors.New("can't move root directory")
  1067  	}
  1068  
  1069  	// find the root src directory
  1070  	err := srcFs.dirCache.FindRoot(ctx, false)
  1071  	if err != nil {
  1072  		return err
  1073  	}
  1074  
  1075  	// find the root dst directory
  1076  	if dstRemote != "" {
  1077  		err = f.dirCache.FindRoot(ctx, true)
  1078  		if err != nil {
  1079  			return err
  1080  		}
  1081  	} else {
  1082  		if f.dirCache.FoundRoot() {
  1083  			return fs.ErrorDirExists
  1084  		}
  1085  	}
  1086  
  1087  	// Find ID of dst parent, creating subdirs if necessary
  1088  	var leaf, dstDirectoryID string
  1089  	findPath := dstRemote
  1090  	if dstRemote == "" {
  1091  		findPath = f.root
  1092  	}
  1093  	leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
  1094  	if err != nil {
  1095  		return err
  1096  	}
  1097  	parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
  1098  
  1099  	// Find ID of src
  1100  	srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
  1101  	if err != nil {
  1102  		return err
  1103  	}
  1104  	_, srcDriveID, _ := parseNormalizedID(srcID)
  1105  
  1106  	if dstDriveID != srcDriveID {
  1107  		// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
  1108  		// "Items cannot be moved between Drives using this request."
  1109  		return fs.ErrorCantDirMove
  1110  	}
  1111  
  1112  	// Check destination does not exist
  1113  	if dstRemote != "" {
  1114  		_, err = f.dirCache.FindDir(ctx, dstRemote, false)
  1115  		if err == fs.ErrorDirNotFound {
  1116  			// OK
  1117  		} else if err != nil {
  1118  			return err
  1119  		} else {
  1120  			return fs.ErrorDirExists
  1121  		}
  1122  	}
  1123  
  1124  	// Get timestamps of src so they can be preserved
  1125  	srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
  1126  	if err != nil {
  1127  		return err
  1128  	}
  1129  
  1130  	// Do the move
  1131  	opts := newOptsCall(srcID, "PATCH", "")
  1132  	move := api.MoveItemRequest{
  1133  		Name: replaceReservedChars(leaf),
  1134  		ParentReference: &api.ItemReference{
  1135  			DriveID: dstDriveID,
  1136  			ID:      parsedDstDirID,
  1137  		},
  1138  		// We set the mod time too as it gets reset otherwise
  1139  		FileSystemInfo: &api.FileSystemInfoFacet{
  1140  			CreatedDateTime:      srcInfo.CreatedDateTime,
  1141  			LastModifiedDateTime: srcInfo.LastModifiedDateTime,
  1142  		},
  1143  	}
  1144  	var resp *http.Response
  1145  	var info api.Item
  1146  	err = f.pacer.Call(func() (bool, error) {
  1147  		resp, err = f.srv.CallJSON(&opts, &move, &info)
  1148  		return shouldRetry(resp, err)
  1149  	})
  1150  	if err != nil {
  1151  		return err
  1152  	}
  1153  
  1154  	srcFs.dirCache.FlushDir(srcRemote)
  1155  	return nil
  1156  }
  1157  
  1158  // DirCacheFlush resets the directory cache - used in testing as an
  1159  // optional interface
  1160  func (f *Fs) DirCacheFlush() {
  1161  	f.dirCache.ResetRoot()
  1162  }
  1163  
  1164  // About gets quota information
  1165  func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
  1166  	var drive api.Drive
  1167  	opts := rest.Opts{
  1168  		Method: "GET",
  1169  		Path:   "",
  1170  	}
  1171  	var resp *http.Response
  1172  	err = f.pacer.Call(func() (bool, error) {
  1173  		resp, err = f.srv.CallJSON(&opts, nil, &drive)
  1174  		return shouldRetry(resp, err)
  1175  	})
  1176  	if err != nil {
  1177  		return nil, errors.Wrap(err, "about failed")
  1178  	}
  1179  	q := drive.Quota
  1180  	usage = &fs.Usage{
  1181  		Total:   fs.NewUsageValue(q.Total),     // quota of bytes that can be used
  1182  		Used:    fs.NewUsageValue(q.Used),      // bytes in use
  1183  		Trashed: fs.NewUsageValue(q.Deleted),   // bytes in trash
  1184  		Free:    fs.NewUsageValue(q.Remaining), // bytes which can be uploaded before reaching the quota
  1185  	}
  1186  	return usage, nil
  1187  }
  1188  
  1189  // Hashes returns the supported hash sets.
  1190  func (f *Fs) Hashes() hash.Set {
  1191  	if f.driveType == driveTypePersonal {
  1192  		return hash.Set(hash.SHA1)
  1193  	}
  1194  	return hash.Set(hash.QuickXorHash)
  1195  }
  1196  
  1197  // PublicLink returns a link for downloading without accout.
  1198  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
  1199  	info, _, err := f.readMetaDataForPath(ctx, f.srvPath(remote))
  1200  	if err != nil {
  1201  		return "", err
  1202  	}
  1203  	opts := newOptsCall(info.GetID(), "POST", "/createLink")
  1204  
  1205  	share := api.CreateShareLinkRequest{
  1206  		Type:  "view",
  1207  		Scope: "anonymous",
  1208  	}
  1209  
  1210  	var resp *http.Response
  1211  	var result api.CreateShareLinkResponse
  1212  	err = f.pacer.Call(func() (bool, error) {
  1213  		resp, err = f.srv.CallJSON(&opts, &share, &result)
  1214  		return shouldRetry(resp, err)
  1215  	})
  1216  	if err != nil {
  1217  		fmt.Println(err)
  1218  		return "", err
  1219  	}
  1220  	return result.Link.WebURL, nil
  1221  }
  1222  
  1223  // ------------------------------------------------------------
  1224  
  1225  // Fs returns the parent Fs
  1226  func (o *Object) Fs() fs.Info {
  1227  	return o.fs
  1228  }
  1229  
  1230  // Return a string version
  1231  func (o *Object) String() string {
  1232  	if o == nil {
  1233  		return "<nil>"
  1234  	}
  1235  	return o.remote
  1236  }
  1237  
  1238  // Remote returns the remote path
  1239  func (o *Object) Remote() string {
  1240  	return o.remote
  1241  }
  1242  
  1243  // srvPath returns a path for use in server given a remote
  1244  func (f *Fs) srvPath(remote string) string {
  1245  	return replaceReservedChars(f.rootSlash() + remote)
  1246  }
  1247  
  1248  // srvPath returns a path for use in server
  1249  func (o *Object) srvPath() string {
  1250  	return o.fs.srvPath(o.remote)
  1251  }
  1252  
  1253  // Hash returns the SHA-1 of an object returning a lowercase hex string
  1254  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1255  	if o.fs.driveType == driveTypePersonal {
  1256  		if t == hash.SHA1 {
  1257  			return o.sha1, nil
  1258  		}
  1259  	} else {
  1260  		if t == hash.QuickXorHash {
  1261  			return o.quickxorhash, nil
  1262  		}
  1263  	}
  1264  	return "", hash.ErrUnsupported
  1265  }
  1266  
  1267  // Size returns the size of an object in bytes
  1268  func (o *Object) Size() int64 {
  1269  	err := o.readMetaData(context.TODO())
  1270  	if err != nil {
  1271  		fs.Logf(o, "Failed to read metadata: %v", err)
  1272  		return 0
  1273  	}
  1274  	return o.size
  1275  }
  1276  
  1277  // setMetaData sets the metadata from info
  1278  func (o *Object) setMetaData(info *api.Item) (err error) {
  1279  	if info.GetFolder() != nil {
  1280  		return errors.Wrapf(fs.ErrorNotAFile, "%q", o.remote)
  1281  	}
  1282  	o.hasMetaData = true
  1283  	o.size = info.GetSize()
  1284  
  1285  	o.isOneNoteFile = info.GetPackageType() == api.PackageTypeOneNote
  1286  
  1287  	// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
  1288  	//
  1289  	// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
  1290  	file := info.GetFile()
  1291  	if file != nil {
  1292  		o.mimeType = file.MimeType
  1293  		if file.Hashes.Sha1Hash != "" {
  1294  			o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
  1295  		}
  1296  		if file.Hashes.QuickXorHash != "" {
  1297  			h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
  1298  			if err != nil {
  1299  				fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
  1300  			} else {
  1301  				o.quickxorhash = hex.EncodeToString(h)
  1302  			}
  1303  		}
  1304  	}
  1305  	fileSystemInfo := info.GetFileSystemInfo()
  1306  	if fileSystemInfo != nil {
  1307  		o.modTime = time.Time(fileSystemInfo.LastModifiedDateTime)
  1308  	} else {
  1309  		o.modTime = time.Time(info.GetLastModifiedDateTime())
  1310  	}
  1311  	o.id = info.GetID()
  1312  	return nil
  1313  }
  1314  
  1315  // readMetaData gets the metadata if it hasn't already been fetched
  1316  //
  1317  // it also sets the info
  1318  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1319  	if o.hasMetaData {
  1320  		return nil
  1321  	}
  1322  	info, _, err := o.fs.readMetaDataForPath(ctx, o.srvPath())
  1323  	if err != nil {
  1324  		if apiErr, ok := err.(*api.Error); ok {
  1325  			if apiErr.ErrorInfo.Code == "itemNotFound" {
  1326  				return fs.ErrorObjectNotFound
  1327  			}
  1328  		}
  1329  		return err
  1330  	}
  1331  	return o.setMetaData(info)
  1332  }
  1333  
  1334  // ModTime returns the modification time of the object
  1335  //
  1336  //
  1337  // It attempts to read the objects mtime and if that isn't present the
  1338  // LastModified returned in the http headers
  1339  func (o *Object) ModTime(ctx context.Context) time.Time {
  1340  	err := o.readMetaData(ctx)
  1341  	if err != nil {
  1342  		fs.Logf(o, "Failed to read metadata: %v", err)
  1343  		return time.Now()
  1344  	}
  1345  	return o.modTime
  1346  }
  1347  
  1348  // setModTime sets the modification time of the local fs object
  1349  func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
  1350  	var opts rest.Opts
  1351  	leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
  1352  	trueDirID, drive, rootURL := parseNormalizedID(directoryID)
  1353  	if drive != "" {
  1354  		opts = rest.Opts{
  1355  			Method:  "PATCH",
  1356  			RootURL: rootURL,
  1357  			Path:    "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(leaf)),
  1358  		}
  1359  	} else {
  1360  		opts = rest.Opts{
  1361  			Method: "PATCH",
  1362  			Path:   "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
  1363  		}
  1364  	}
  1365  	update := api.SetFileSystemInfo{
  1366  		FileSystemInfo: api.FileSystemInfoFacet{
  1367  			CreatedDateTime:      api.Timestamp(modTime),
  1368  			LastModifiedDateTime: api.Timestamp(modTime),
  1369  		},
  1370  	}
  1371  	var info *api.Item
  1372  	err := o.fs.pacer.Call(func() (bool, error) {
  1373  		resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
  1374  		return shouldRetry(resp, err)
  1375  	})
  1376  	return info, err
  1377  }
  1378  
  1379  // SetModTime sets the modification time of the local fs object
  1380  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1381  	info, err := o.setModTime(ctx, modTime)
  1382  	if err != nil {
  1383  		return err
  1384  	}
  1385  	return o.setMetaData(info)
  1386  }
  1387  
  1388  // Storable returns a boolean showing whether this object storable
  1389  func (o *Object) Storable() bool {
  1390  	return true
  1391  }
  1392  
  1393  // Open an object for read
  1394  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1395  	if o.id == "" {
  1396  		return nil, errors.New("can't download - no id")
  1397  	}
  1398  	if o.isOneNoteFile {
  1399  		return nil, errors.New("can't open a OneNote file")
  1400  	}
  1401  
  1402  	fs.FixRangeOption(options, o.size)
  1403  	var resp *http.Response
  1404  	opts := newOptsCall(o.id, "GET", "/content")
  1405  	opts.Options = options
  1406  
  1407  	err = o.fs.pacer.Call(func() (bool, error) {
  1408  		resp, err = o.fs.srv.Call(&opts)
  1409  		return shouldRetry(resp, err)
  1410  	})
  1411  	if err != nil {
  1412  		return nil, err
  1413  	}
  1414  
  1415  	if resp.StatusCode == http.StatusOK && resp.ContentLength > 0 && resp.Header.Get("Content-Range") == "" {
  1416  		//Overwrite size with actual size since size readings from Onedrive is unreliable.
  1417  		o.size = resp.ContentLength
  1418  	}
  1419  	return resp.Body, err
  1420  }
  1421  
  1422  // createUploadSession creates an upload session for the object
  1423  func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
  1424  	leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
  1425  	id, drive, rootURL := parseNormalizedID(directoryID)
  1426  	var opts rest.Opts
  1427  	if drive != "" {
  1428  		opts = rest.Opts{
  1429  			Method:  "POST",
  1430  			RootURL: rootURL,
  1431  			Path:    "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(replaceReservedChars(leaf)) + ":/createUploadSession",
  1432  		}
  1433  	} else {
  1434  		opts = rest.Opts{
  1435  			Method: "POST",
  1436  			Path:   "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession",
  1437  		}
  1438  	}
  1439  	createRequest := api.CreateUploadRequest{}
  1440  	createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
  1441  	createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
  1442  	var resp *http.Response
  1443  	err = o.fs.pacer.Call(func() (bool, error) {
  1444  		resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
  1445  		if apiErr, ok := err.(*api.Error); ok {
  1446  			if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
  1447  				// Make the error more user-friendly
  1448  				err = errors.New(err.Error() + " (is it a OneNote file?)")
  1449  			}
  1450  		}
  1451  		return shouldRetry(resp, err)
  1452  	})
  1453  	return response, err
  1454  }
  1455  
  1456  // uploadFragment uploads a part
  1457  func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
  1458  	opts := rest.Opts{
  1459  		Method:        "PUT",
  1460  		RootURL:       url,
  1461  		ContentLength: &chunkSize,
  1462  		ContentRange:  fmt.Sprintf("bytes %d-%d/%d", start, start+chunkSize-1, totalSize),
  1463  		Body:          chunk,
  1464  	}
  1465  	//	var response api.UploadFragmentResponse
  1466  	var resp *http.Response
  1467  	err = o.fs.pacer.Call(func() (bool, error) {
  1468  		_, _ = chunk.Seek(0, io.SeekStart)
  1469  		resp, err = o.fs.srv.Call(&opts)
  1470  		if resp != nil {
  1471  			defer fs.CheckClose(resp.Body, &err)
  1472  		}
  1473  		retry, err := shouldRetry(resp, err)
  1474  		if !retry && resp != nil {
  1475  			if resp.StatusCode == 200 || resp.StatusCode == 201 {
  1476  				// we are done :)
  1477  				// read the item
  1478  				info = &api.Item{}
  1479  				return false, json.NewDecoder(resp.Body).Decode(info)
  1480  			}
  1481  		}
  1482  		return retry, err
  1483  	})
  1484  	return info, err
  1485  }
  1486  
  1487  // cancelUploadSession cancels an upload session
  1488  func (o *Object) cancelUploadSession(url string) (err error) {
  1489  	opts := rest.Opts{
  1490  		Method:     "DELETE",
  1491  		RootURL:    url,
  1492  		NoResponse: true,
  1493  	}
  1494  	var resp *http.Response
  1495  	err = o.fs.pacer.Call(func() (bool, error) {
  1496  		resp, err = o.fs.srv.Call(&opts)
  1497  		return shouldRetry(resp, err)
  1498  	})
  1499  	return
  1500  }
  1501  
  1502  // uploadMultipart uploads a file using multipart upload
  1503  func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
  1504  	if size <= 0 {
  1505  		return nil, errors.New("unknown-sized upload not supported")
  1506  	}
  1507  
  1508  	uploadURLChan := make(chan string, 1)
  1509  	gracefulCancel := func() {
  1510  		uploadURL, ok := <-uploadURLChan
  1511  		// Reading from uploadURLChan blocks the atexit process until
  1512  		// we are able to use uploadURL to cancel the upload
  1513  		if !ok { // createUploadSession failed - no need to cancel upload
  1514  			return
  1515  		}
  1516  
  1517  		fs.Debugf(o, "Cancelling multipart upload")
  1518  		cancelErr := o.cancelUploadSession(uploadURL)
  1519  		if cancelErr != nil {
  1520  			fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
  1521  		}
  1522  	}
  1523  	cancelFuncHandle := atexit.Register(gracefulCancel)
  1524  
  1525  	// Create upload session
  1526  	fs.Debugf(o, "Starting multipart upload")
  1527  	session, err := o.createUploadSession(ctx, modTime)
  1528  	if err != nil {
  1529  		close(uploadURLChan)
  1530  		atexit.Unregister(cancelFuncHandle)
  1531  		return nil, err
  1532  	}
  1533  	uploadURL := session.UploadURL
  1534  	uploadURLChan <- uploadURL
  1535  
  1536  	defer func() {
  1537  		if err != nil {
  1538  			fs.Debugf(o, "Error encountered during upload: %v", err)
  1539  			gracefulCancel()
  1540  		}
  1541  		atexit.Unregister(cancelFuncHandle)
  1542  	}()
  1543  
  1544  	// Upload the chunks
  1545  	remaining := size
  1546  	position := int64(0)
  1547  	for remaining > 0 {
  1548  		n := int64(o.fs.opt.ChunkSize)
  1549  		if remaining < n {
  1550  			n = remaining
  1551  		}
  1552  		seg := readers.NewRepeatableReader(io.LimitReader(in, n))
  1553  		fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
  1554  		info, err = o.uploadFragment(uploadURL, position, size, seg, n)
  1555  		if err != nil {
  1556  			return nil, err
  1557  		}
  1558  		remaining -= n
  1559  		position += n
  1560  	}
  1561  
  1562  	return info, nil
  1563  }
  1564  
  1565  // Update the content of a remote file within 4MB size in one single request
  1566  // This function will set modtime after uploading, which will create a new version for the remote file
  1567  func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
  1568  	if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
  1569  		return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
  1570  	}
  1571  
  1572  	fs.Debugf(o, "Starting singlepart upload")
  1573  	var resp *http.Response
  1574  	var opts rest.Opts
  1575  	leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
  1576  	trueDirID, drive, rootURL := parseNormalizedID(directoryID)
  1577  	if drive != "" {
  1578  		opts = rest.Opts{
  1579  			Method:        "PUT",
  1580  			RootURL:       rootURL,
  1581  			Path:          "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
  1582  			ContentLength: &size,
  1583  			Body:          in,
  1584  		}
  1585  	} else {
  1586  		opts = rest.Opts{
  1587  			Method:        "PUT",
  1588  			Path:          "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
  1589  			ContentLength: &size,
  1590  			Body:          in,
  1591  		}
  1592  	}
  1593  
  1594  	err = o.fs.pacer.Call(func() (bool, error) {
  1595  		resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
  1596  		if apiErr, ok := err.(*api.Error); ok {
  1597  			if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
  1598  				// Make the error more user-friendly
  1599  				err = errors.New(err.Error() + " (is it a OneNote file?)")
  1600  			}
  1601  		}
  1602  		return shouldRetry(resp, err)
  1603  	})
  1604  	if err != nil {
  1605  		return nil, err
  1606  	}
  1607  
  1608  	err = o.setMetaData(info)
  1609  	if err != nil {
  1610  		return nil, err
  1611  	}
  1612  	// Set the mod time now and read metadata
  1613  	return o.setModTime(ctx, modTime)
  1614  }
  1615  
  1616  // Update the object with the contents of the io.Reader, modTime and size
  1617  //
  1618  // The new object may have been created if an error is returned
  1619  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1620  	if o.hasMetaData && o.isOneNoteFile {
  1621  		return errors.New("can't upload content to a OneNote file")
  1622  	}
  1623  
  1624  	o.fs.tokenRenewer.Start()
  1625  	defer o.fs.tokenRenewer.Stop()
  1626  
  1627  	size := src.Size()
  1628  	modTime := src.ModTime(ctx)
  1629  
  1630  	var info *api.Item
  1631  	if size > 0 {
  1632  		info, err = o.uploadMultipart(ctx, in, size, modTime)
  1633  	} else if size == 0 {
  1634  		info, err = o.uploadSinglepart(ctx, in, size, modTime)
  1635  	} else {
  1636  		return errors.New("unknown-sized upload not supported")
  1637  	}
  1638  	if err != nil {
  1639  		return err
  1640  	}
  1641  
  1642  	return o.setMetaData(info)
  1643  }
  1644  
  1645  // Remove an object
  1646  func (o *Object) Remove(ctx context.Context) error {
  1647  	return o.fs.deleteObject(o.id)
  1648  }
  1649  
  1650  // MimeType of an Object if known, "" otherwise
  1651  func (o *Object) MimeType(ctx context.Context) string {
  1652  	return o.mimeType
  1653  }
  1654  
  1655  // ID returns the ID of the Object if known, or "" if not
  1656  func (o *Object) ID() string {
  1657  	return o.id
  1658  }
  1659  
  1660  func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
  1661  	id, drive, rootURL := parseNormalizedID(normalizedID)
  1662  
  1663  	if drive != "" {
  1664  		return rest.Opts{
  1665  			Method:  method,
  1666  			RootURL: rootURL,
  1667  			Path:    "/" + drive + "/items/" + id + route,
  1668  		}
  1669  	}
  1670  	return rest.Opts{
  1671  		Method: method,
  1672  		Path:   "/items/" + id + route,
  1673  	}
  1674  }
  1675  
  1676  // parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
  1677  // and returns itemID, driveID, rootURL.
  1678  // Such a normalized ID can come from (*Item).GetID()
  1679  func parseNormalizedID(ID string) (string, string, string) {
  1680  	if strings.Index(ID, "#") >= 0 {
  1681  		s := strings.Split(ID, "#")
  1682  		return s[1], s[0], graphURL + "/drives"
  1683  	}
  1684  	return ID, "", ""
  1685  }
  1686  
  1687  // getRelativePathInsideBase checks if `target` is inside `base`. If so, it
  1688  // returns a relative path for `target` based on `base` and a boolean `true`.
  1689  // Otherwise returns "", false.
  1690  func getRelativePathInsideBase(base, target string) (string, bool) {
  1691  	if base == "" {
  1692  		return target, true
  1693  	}
  1694  
  1695  	baseSlash := base + "/"
  1696  	if strings.HasPrefix(target+"/", baseSlash) {
  1697  		return target[len(baseSlash):], true
  1698  	}
  1699  	return "", false
  1700  }
  1701  
  1702  // Adds a ":" at the end of `remotePath` in a proper manner.
  1703  // If `remotePath` already ends with "/", change it to ":/"
  1704  // If `remotePath` is "", return "".
  1705  // A workaround for #2720 and #3039
  1706  func withTrailingColon(remotePath string) string {
  1707  	if remotePath == "" {
  1708  		return ""
  1709  	}
  1710  
  1711  	if strings.HasSuffix(remotePath, "/") {
  1712  		return remotePath[:len(remotePath)-1] + ":/"
  1713  	}
  1714  	return remotePath + ":"
  1715  }
  1716  
  1717  // Check the interfaces are satisfied
  1718  var (
  1719  	_ fs.Fs              = (*Fs)(nil)
  1720  	_ fs.Purger          = (*Fs)(nil)
  1721  	_ fs.Copier          = (*Fs)(nil)
  1722  	_ fs.Mover           = (*Fs)(nil)
  1723  	_ fs.DirMover        = (*Fs)(nil)
  1724  	_ fs.DirCacheFlusher = (*Fs)(nil)
  1725  	_ fs.Abouter         = (*Fs)(nil)
  1726  	_ fs.PublicLinker    = (*Fs)(nil)
  1727  	_ fs.Object          = (*Object)(nil)
  1728  	_ fs.MimeTyper       = &Object{}
  1729  	_ fs.IDer            = &Object{}
  1730  )