github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/onedrive/onedrive.go (about)

     1  // Package onedrive provides an interface to the Microsoft OneDrive
     2  // object storage system.
     3  package onedrive
     4  
     5  import (
     6  	"context"
     7  	_ "embed"
     8  	"encoding/base64"
     9  	"encoding/hex"
    10  	"encoding/json"
    11  	"errors"
    12  	"fmt"
    13  	"io"
    14  	"net/http"
    15  	"net/url"
    16  	"path"
    17  	"regexp"
    18  	"strconv"
    19  	"strings"
    20  	"sync"
    21  	"time"
    22  
    23  	"github.com/rclone/rclone/backend/onedrive/api"
    24  	"github.com/rclone/rclone/backend/onedrive/quickxorhash"
    25  	"github.com/rclone/rclone/fs"
    26  	"github.com/rclone/rclone/fs/config"
    27  	"github.com/rclone/rclone/fs/config/configmap"
    28  	"github.com/rclone/rclone/fs/config/configstruct"
    29  	"github.com/rclone/rclone/fs/config/obscure"
    30  	"github.com/rclone/rclone/fs/fserrors"
    31  	"github.com/rclone/rclone/fs/fshttp"
    32  	"github.com/rclone/rclone/fs/hash"
    33  	"github.com/rclone/rclone/fs/log"
    34  	"github.com/rclone/rclone/fs/operations"
    35  	"github.com/rclone/rclone/fs/walk"
    36  	"github.com/rclone/rclone/lib/atexit"
    37  	"github.com/rclone/rclone/lib/dircache"
    38  	"github.com/rclone/rclone/lib/encoder"
    39  	"github.com/rclone/rclone/lib/oauthutil"
    40  	"github.com/rclone/rclone/lib/pacer"
    41  	"github.com/rclone/rclone/lib/readers"
    42  	"github.com/rclone/rclone/lib/rest"
    43  	"golang.org/x/oauth2"
    44  )
    45  
    46  const (
    47  	rcloneClientID              = "b15665d9-eda6-4092-8539-0eec376afd59"
    48  	rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
    49  	minSleep                    = 10 * time.Millisecond
    50  	maxSleep                    = 2 * time.Second
    51  	decayConstant               = 2 // bigger for slower decay, exponential
    52  	configDriveID               = "drive_id"
    53  	configDriveType             = "drive_type"
    54  	driveTypePersonal           = "personal"
    55  	driveTypeBusiness           = "business"
    56  	driveTypeSharepoint         = "documentLibrary"
    57  	defaultChunkSize            = 10 * fs.Mebi
    58  	chunkSizeMultiple           = 320 * fs.Kibi
    59  
    60  	regionGlobal = "global"
    61  	regionUS     = "us"
    62  	regionDE     = "de"
    63  	regionCN     = "cn"
    64  )
    65  
    66  // Globals
    67  var (
    68  	authPath  = "/common/oauth2/v2.0/authorize"
    69  	tokenPath = "/common/oauth2/v2.0/token"
    70  
    71  	scopeAccess             = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
    72  	scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
    73  
    74  	// Description of how to auth for this app for a business account
    75  	oauthConfig = &oauth2.Config{
    76  		Scopes:       scopeAccess,
    77  		ClientID:     rcloneClientID,
    78  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
    79  		RedirectURL:  oauthutil.RedirectLocalhostURL,
    80  	}
    81  
    82  	graphAPIEndpoint = map[string]string{
    83  		"global": "https://graph.microsoft.com",
    84  		"us":     "https://graph.microsoft.us",
    85  		"de":     "https://graph.microsoft.de",
    86  		"cn":     "https://microsoftgraph.chinacloudapi.cn",
    87  	}
    88  
    89  	authEndpoint = map[string]string{
    90  		"global": "https://login.microsoftonline.com",
    91  		"us":     "https://login.microsoftonline.us",
    92  		"de":     "https://login.microsoftonline.de",
    93  		"cn":     "https://login.chinacloudapi.cn",
    94  	}
    95  
    96  	// QuickXorHashType is the hash.Type for OneDrive
    97  	QuickXorHashType hash.Type
    98  
    99  	//go:embed metadata.md
   100  	metadataHelp string
   101  )
   102  
   103  // Register with Fs
   104  func init() {
   105  	QuickXorHashType = hash.RegisterHash("quickxor", "QuickXorHash", 40, quickxorhash.New)
   106  	fs.Register(&fs.RegInfo{
   107  		Name:        "onedrive",
   108  		Description: "Microsoft OneDrive",
   109  		NewFs:       NewFs,
   110  		Config:      Config,
   111  		MetadataInfo: &fs.MetadataInfo{
   112  			System: systemMetadataInfo,
   113  			Help:   metadataHelp,
   114  		},
   115  		Options: append(oauthutil.SharedOptions, []fs.Option{{
   116  			Name:    "region",
   117  			Help:    "Choose national cloud region for OneDrive.",
   118  			Default: "global",
   119  			Examples: []fs.OptionExample{
   120  				{
   121  					Value: regionGlobal,
   122  					Help:  "Microsoft Cloud Global",
   123  				}, {
   124  					Value: regionUS,
   125  					Help:  "Microsoft Cloud for US Government",
   126  				}, {
   127  					Value: regionDE,
   128  					Help:  "Microsoft Cloud Germany",
   129  				}, {
   130  					Value: regionCN,
   131  					Help:  "Azure and Office 365 operated by Vnet Group in China",
   132  				},
   133  			},
   134  		}, {
   135  			Name: "chunk_size",
   136  			Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
   137  
   138  Above this size files will be chunked - must be multiple of 320k (327,680 bytes) and
   139  should not exceed 250M (262,144,000 bytes) else you may encounter \"Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
   140  Note that the chunks will be buffered into memory.`,
   141  			Default:  defaultChunkSize,
   142  			Advanced: true,
   143  		}, {
   144  			Name:      "drive_id",
   145  			Help:      "The ID of the drive to use.",
   146  			Default:   "",
   147  			Advanced:  true,
   148  			Sensitive: true,
   149  		}, {
   150  			Name:     "drive_type",
   151  			Help:     "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
   152  			Default:  "",
   153  			Advanced: true,
   154  		}, {
   155  			Name: "root_folder_id",
   156  			Help: `ID of the root folder.
   157  
   158  This isn't normally needed, but in special circumstances you might
   159  know the folder ID that you wish to access but not be able to get
   160  there through a path traversal.
   161  `,
   162  			Advanced:  true,
   163  			Sensitive: true,
   164  		}, {
   165  			Name: "access_scopes",
   166  			Help: `Set scopes to be requested by rclone.
   167  
   168  Choose or manually enter a custom space separated list with all scopes, that rclone should request.
   169  `,
   170  			Default:  scopeAccess,
   171  			Advanced: true,
   172  			Examples: []fs.OptionExample{
   173  				{
   174  					Value: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access",
   175  					Help:  "Read and write access to all resources",
   176  				},
   177  				{
   178  					Value: "Files.Read Files.Read.All Sites.Read.All offline_access",
   179  					Help:  "Read only access to all resources",
   180  				},
   181  				{
   182  					Value: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All offline_access",
   183  					Help:  "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
   184  				},
   185  			},
   186  		}, {
   187  			Name: "disable_site_permission",
   188  			Help: `Disable the request for Sites.Read.All permission.
   189  
   190  If set to true, you will no longer be able to search for a SharePoint site when
   191  configuring drive ID, because rclone will not request Sites.Read.All permission.
   192  Set it to true if your organization didn't assign Sites.Read.All permission to the
   193  application, and your organization disallows users to consent app permission
   194  request on their own.`,
   195  			Default:  false,
   196  			Advanced: true,
   197  			Hide:     fs.OptionHideBoth,
   198  		}, {
   199  			Name: "expose_onenote_files",
   200  			Help: `Set to make OneNote files show up in directory listings.
   201  
   202  By default, rclone will hide OneNote files in directory listings because
   203  operations like "Open" and "Update" won't work on them.  But this
   204  behaviour may also prevent you from deleting them.  If you want to
   205  delete OneNote files or otherwise want them to show up in directory
   206  listing, set this option.`,
   207  			Default:  false,
   208  			Advanced: true,
   209  		}, {
   210  			Name:    "server_side_across_configs",
   211  			Default: false,
   212  			Help: `Deprecated: use --server-side-across-configs instead.
   213  
   214  Allow server-side operations (e.g. copy) to work across different onedrive configs.
   215  
   216  This will work if you are copying between two OneDrive *Personal* drives AND the files to
   217  copy are already shared between them. Additionally, it should also function for a user who
   218  has access permissions both between Onedrive for *business* and *SharePoint* under the *same
   219  tenant*, and between *SharePoint* and another *SharePoint* under the *same tenant*. In other
   220  cases, rclone will fall back to normal copy (which will be slightly slower).`,
   221  			Advanced: true,
   222  		}, {
   223  			Name:     "list_chunk",
   224  			Help:     "Size of listing chunk.",
   225  			Default:  1000,
   226  			Advanced: true,
   227  		}, {
   228  			Name:    "no_versions",
   229  			Default: false,
   230  			Help: `Remove all versions on modifying operations.
   231  
   232  Onedrive for business creates versions when rclone uploads new files
   233  overwriting an existing one and when it sets the modification time.
   234  
   235  These versions take up space out of the quota.
   236  
   237  This flag checks for versions after file upload and setting
   238  modification time and removes all but the last version.
   239  
   240  **NB** Onedrive personal can't currently delete versions so don't use
   241  this flag there.
   242  `,
   243  			Advanced: true,
   244  		}, {
   245  			Name:     "link_scope",
   246  			Default:  "anonymous",
   247  			Help:     `Set the scope of the links created by the link command.`,
   248  			Advanced: true,
   249  			Examples: []fs.OptionExample{{
   250  				Value: "anonymous",
   251  				Help:  "Anyone with the link has access, without needing to sign in.\nThis may include people outside of your organization.\nAnonymous link support may be disabled by an administrator.",
   252  			}, {
   253  				Value: "organization",
   254  				Help:  "Anyone signed into your organization (tenant) can use the link to get access.\nOnly available in OneDrive for Business and SharePoint.",
   255  			}},
   256  		}, {
   257  			Name:     "link_type",
   258  			Default:  "view",
   259  			Help:     `Set the type of the links created by the link command.`,
   260  			Advanced: true,
   261  			Examples: []fs.OptionExample{{
   262  				Value: "view",
   263  				Help:  "Creates a read-only link to the item.",
   264  			}, {
   265  				Value: "edit",
   266  				Help:  "Creates a read-write link to the item.",
   267  			}, {
   268  				Value: "embed",
   269  				Help:  "Creates an embeddable link to the item.",
   270  			}},
   271  		}, {
   272  			Name:    "link_password",
   273  			Default: "",
   274  			Help: `Set the password for links created by the link command.
   275  
   276  At the time of writing this only works with OneDrive personal paid accounts.
   277  `,
   278  			Advanced:  true,
   279  			Sensitive: true,
   280  		}, {
   281  			Name:    "hash_type",
   282  			Default: "auto",
   283  			Help: `Specify the hash in use for the backend.
   284  
   285  This specifies the hash type in use. If set to "auto" it will use the
   286  default hash which is QuickXorHash.
   287  
   288  Before rclone 1.62 an SHA1 hash was used by default for Onedrive
   289  Personal. For 1.62 and later the default is to use a QuickXorHash for
   290  all onedrive types. If an SHA1 hash is desired then set this option
   291  accordingly.
   292  
   293  From July 2023 QuickXorHash will be the only available hash for
   294  both OneDrive for Business and OneDrive Personal.
   295  
   296  This can be set to "none" to not use any hashes.
   297  
   298  If the hash requested does not exist on the object, it will be
   299  returned as an empty string which is treated as a missing hash by
   300  rclone.
   301  `,
   302  			Examples: []fs.OptionExample{{
   303  				Value: "auto",
   304  				Help:  "Rclone chooses the best hash",
   305  			}, {
   306  				Value: "quickxor",
   307  				Help:  "QuickXor",
   308  			}, {
   309  				Value: "sha1",
   310  				Help:  "SHA1",
   311  			}, {
   312  				Value: "sha256",
   313  				Help:  "SHA256",
   314  			}, {
   315  				Value: "crc32",
   316  				Help:  "CRC32",
   317  			}, {
   318  				Value: "none",
   319  				Help:  "None - don't use any hashes",
   320  			}},
   321  			Advanced: true,
   322  		}, {
   323  			Name:    "av_override",
   324  			Default: false,
   325  			Help: `Allows download of files the server thinks has a virus.
   326  
   327  The onedrive/sharepoint server may check files uploaded with an Anti
   328  Virus checker. If it detects any potential viruses or malware it will
   329  block download of the file.
   330  
   331  In this case you will see a message like this
   332  
   333      server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden: 
   334  
   335  If you are 100% sure you want to download this file anyway then use
   336  the --onedrive-av-override flag, or av_override = true in the config
   337  file.
   338  `,
   339  			Advanced: true,
   340  		}, {
   341  			Name:    "delta",
   342  			Default: false,
   343  			Help: strings.ReplaceAll(`If set rclone will use delta listing to implement recursive listings.
   344  
   345  If this flag is set the onedrive backend will advertise |ListR|
   346  support for recursive listings.
   347  
   348  Setting this flag speeds up these things greatly:
   349  
   350      rclone lsf -R onedrive:
   351      rclone size onedrive:
   352      rclone rc vfs/refresh recursive=true
   353  
   354  **However** the delta listing API **only** works at the root of the
   355  drive. If you use it not at the root then it recurses from the root
   356  and discards all the data that is not under the directory you asked
   357  for. So it will be correct but may not be very efficient.
   358  
   359  This is why this flag is not set as the default.
   360  
   361  As a rule of thumb if nearly all of your data is under rclone's root
   362  directory (the |root/directory| in |onedrive:root/directory|) then
   363  using this flag will be be a big performance win. If your data is
   364  mostly not under the root then using this flag will be a big
   365  performance loss.
   366  
   367  It is recommended if you are mounting your onedrive at the root
   368  (or near the root when using crypt) and using rclone |rc vfs/refresh|.
   369  `, "|", "`"),
   370  			Advanced: true,
   371  		}, {
   372  			Name: "metadata_permissions",
   373  			Help: `Control whether permissions should be read or written in metadata.
   374  
   375  Reading permissions metadata from files can be done quickly, but it
   376  isn't always desirable to set the permissions from the metadata.
   377  `,
   378  			Advanced: true,
   379  			Default:  rwOff,
   380  			Examples: rwExamples,
   381  		}, {
   382  			Name:     config.ConfigEncoding,
   383  			Help:     config.ConfigEncodingHelp,
   384  			Advanced: true,
   385  			// List of replaced characters:
   386  			//   < (less than)     -> '<' // FULLWIDTH LESS-THAN SIGN
   387  			//   > (greater than)  -> '>' // FULLWIDTH GREATER-THAN SIGN
   388  			//   : (colon)         -> ':' // FULLWIDTH COLON
   389  			//   " (double quote)  -> '"' // FULLWIDTH QUOTATION MARK
   390  			//   \ (backslash)     -> '\' // FULLWIDTH REVERSE SOLIDUS
   391  			//   | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE
   392  			//   ? (question mark) -> '?' // FULLWIDTH QUESTION MARK
   393  			//   * (asterisk)      -> '*' // FULLWIDTH ASTERISK
   394  			//
   395  			// Folder names cannot begin with a tilde ('~')
   396  			// List of replaced characters:
   397  			//   ~ (tilde)        -> '~'  // FULLWIDTH TILDE
   398  			//
   399  			// Additionally names can't begin with a space ( ) or end with a period (.) or space ( ).
   400  			// List of replaced characters:
   401  			//   . (period)        -> '.' // FULLWIDTH FULL STOP
   402  			//     (space)         -> '␠'  // SYMBOL FOR SPACE
   403  			//
   404  			// Also encode invalid UTF-8 bytes as json doesn't handle them.
   405  			//
   406  			// The OneDrive API documentation lists the set of reserved characters, but
   407  			// testing showed this list is incomplete. This are the differences:
   408  			//  - " (double quote) is rejected, but missing in the documentation
   409  			//  - space at the end of file and folder names is rejected, but missing in the documentation
   410  			//  - period at the end of file names is rejected, but missing in the documentation
   411  			//
   412  			// Adding these restrictions to the OneDrive API documentation yields exactly
   413  			// the same rules as the Windows naming conventions.
   414  			//
   415  			// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
   416  			Default: (encoder.Display |
   417  				encoder.EncodeBackSlash |
   418  				encoder.EncodeLeftSpace |
   419  				encoder.EncodeLeftTilde |
   420  				encoder.EncodeRightPeriod |
   421  				encoder.EncodeRightSpace |
   422  				encoder.EncodeWin |
   423  				encoder.EncodeInvalidUtf8),
   424  		}}...),
   425  	})
   426  }
   427  
   428  // Get the region and graphURL from the config
   429  func getRegionURL(m configmap.Mapper) (region, graphURL string) {
   430  	region, _ = m.Get("region")
   431  	graphURL = graphAPIEndpoint[region] + "/v1.0"
   432  	return region, graphURL
   433  }
   434  
   435  // Config for chooseDrive
   436  type chooseDriveOpt struct {
   437  	opts         rest.Opts
   438  	finalDriveID string
   439  	siteID       string
   440  	relativePath string
   441  }
   442  
   443  // chooseDrive returns a query to choose which drive the user is interested in
   444  func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest.Client, opt chooseDriveOpt) (*fs.ConfigOut, error) {
   445  	_, graphURL := getRegionURL(m)
   446  
   447  	// if we use server-relative URL for finding the drive
   448  	if opt.relativePath != "" {
   449  		opt.opts = rest.Opts{
   450  			Method:  "GET",
   451  			RootURL: graphURL,
   452  			Path:    "/sites/root:" + opt.relativePath,
   453  		}
   454  		site := api.SiteResource{}
   455  		_, err := srv.CallJSON(ctx, &opt.opts, nil, &site)
   456  		if err != nil {
   457  			return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err))
   458  		}
   459  		opt.siteID = site.SiteID
   460  	}
   461  
   462  	// if we have a siteID we need to ask for the drives
   463  	if opt.siteID != "" {
   464  		opt.opts = rest.Opts{
   465  			Method:  "GET",
   466  			RootURL: graphURL,
   467  			Path:    "/sites/" + opt.siteID + "/drives",
   468  		}
   469  	}
   470  
   471  	drives := api.DrivesResponse{}
   472  
   473  	// We don't have the final ID yet?
   474  	// query Microsoft Graph
   475  	if opt.finalDriveID == "" {
   476  		_, err := srv.CallJSON(ctx, &opt.opts, nil, &drives)
   477  		if err != nil {
   478  			return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
   479  		}
   480  
   481  		// Also call /me/drive as sometimes /me/drives doesn't return it #4068
   482  		if opt.opts.Path == "/me/drives" {
   483  			opt.opts.Path = "/me/drive"
   484  			meDrive := api.DriveResource{}
   485  			_, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive)
   486  			if err != nil {
   487  				return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
   488  			}
   489  			found := false
   490  			for _, drive := range drives.Drives {
   491  				if drive.DriveID == meDrive.DriveID {
   492  					found = true
   493  					break
   494  				}
   495  			}
   496  			// add the me drive if not found already
   497  			if !found {
   498  				fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
   499  				drives.Drives = append(drives.Drives, meDrive)
   500  			}
   501  		}
   502  	} else {
   503  		drives.Drives = append(drives.Drives, api.DriveResource{
   504  			DriveID:   opt.finalDriveID,
   505  			DriveName: "Chosen Drive ID",
   506  			DriveType: "drive",
   507  		})
   508  	}
   509  	if len(drives.Drives) == 0 {
   510  		return fs.ConfigError("choose_type", "No drives found")
   511  	}
   512  	return fs.ConfigChoose("driveid_final", "config_driveid", "Select drive you want to use", len(drives.Drives), func(i int) (string, string) {
   513  		drive := drives.Drives[i]
   514  		return drive.DriveID, fmt.Sprintf("%s (%s)", drive.DriveName, drive.DriveType)
   515  	})
   516  }
   517  
   518  // Config the backend
   519  func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
   520  	region, graphURL := getRegionURL(m)
   521  
   522  	if config.State == "" {
   523  		var accessScopes fs.SpaceSepList
   524  		accessScopesString, _ := m.Get("access_scopes")
   525  		err := accessScopes.Set(accessScopesString)
   526  		if err != nil {
   527  			return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
   528  		}
   529  		oauthConfig.Scopes = []string(accessScopes)
   530  		disableSitePermission, _ := m.Get("disable_site_permission")
   531  		if disableSitePermission == "true" {
   532  			oauthConfig.Scopes = scopeAccessWithoutSites
   533  		}
   534  		oauthConfig.Endpoint = oauth2.Endpoint{
   535  			AuthURL:  authEndpoint[region] + authPath,
   536  			TokenURL: authEndpoint[region] + tokenPath,
   537  		}
   538  		return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
   539  			OAuth2Config: oauthConfig,
   540  		})
   541  	}
   542  
   543  	oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
   544  	if err != nil {
   545  		return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
   546  	}
   547  	srv := rest.NewClient(oAuthClient)
   548  
   549  	switch config.State {
   550  	case "choose_type":
   551  		return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
   552  			Value: "onedrive",
   553  			Help:  "OneDrive Personal or Business",
   554  		}, {
   555  			Value: "sharepoint",
   556  			Help:  "Root Sharepoint site",
   557  		}, {
   558  			Value: "url",
   559  			Help:  "Sharepoint site name or URL\nE.g. mysite or https://contoso.sharepoint.com/sites/mysite",
   560  		}, {
   561  			Value: "search",
   562  			Help:  "Search for a Sharepoint site",
   563  		}, {
   564  			Value: "driveid",
   565  			Help:  "Type in driveID (advanced)",
   566  		}, {
   567  			Value: "siteid",
   568  			Help:  "Type in SiteID (advanced)",
   569  		}, {
   570  			Value: "path",
   571  			Help:  "Sharepoint server-relative path (advanced)\nE.g. /teams/hr",
   572  		}})
   573  	case "choose_type_done":
   574  		// Jump to next state according to config chosen
   575  		return fs.ConfigGoto(config.Result)
   576  	case "onedrive":
   577  		return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
   578  			opts: rest.Opts{
   579  				Method:  "GET",
   580  				RootURL: graphURL,
   581  				Path:    "/me/drives",
   582  			},
   583  		})
   584  	case "sharepoint":
   585  		return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
   586  			opts: rest.Opts{
   587  				Method:  "GET",
   588  				RootURL: graphURL,
   589  				Path:    "/sites/root/drives",
   590  			},
   591  		})
   592  	case "driveid":
   593  		return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
   594  	case "driveid_end":
   595  		return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
   596  			finalDriveID: config.Result,
   597  		})
   598  	case "siteid":
   599  		return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
   600  	case "siteid_end":
   601  		return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
   602  			siteID: config.Result,
   603  		})
   604  	case "url":
   605  		return fs.ConfigInput("url_end", "config_site_url", `Site URL
   606  
   607  Examples:
   608  - "mysite"
   609  - "https://XXX.sharepoint.com/sites/mysite"
   610  - "https://XXX.sharepoint.com/teams/ID"
   611  `)
   612  	case "url_end":
   613  		siteURL := config.Result
   614  		re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
   615  		match := re.FindStringSubmatch(siteURL)
   616  		if len(match) == 2 {
   617  			return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
   618  				relativePath: match[1],
   619  			})
   620  		}
   621  		return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
   622  			relativePath: "/sites/" + siteURL,
   623  		})
   624  	case "path":
   625  		return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
   626  	case "path_end":
   627  		return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
   628  			relativePath: config.Result,
   629  		})
   630  	case "search":
   631  		return fs.ConfigInput("search_end", "config_search_term", `Search term`)
   632  	case "search_end":
   633  		searchTerm := config.Result
   634  		opts := rest.Opts{
   635  			Method:  "GET",
   636  			RootURL: graphURL,
   637  			Path:    "/sites?search=" + searchTerm,
   638  		}
   639  
   640  		sites := api.SiteResponse{}
   641  		_, err := srv.CallJSON(ctx, &opts, nil, &sites)
   642  		if err != nil {
   643  			return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err))
   644  		}
   645  
   646  		if len(sites.Sites) == 0 {
   647  			return fs.ConfigError("choose_type", fmt.Sprintf("search for %q returned no results", searchTerm))
   648  		}
   649  		return fs.ConfigChoose("search_sites", "config_site", `Select the Site you want to use`, len(sites.Sites), func(i int) (string, string) {
   650  			site := sites.Sites[i]
   651  			return site.SiteID, fmt.Sprintf("%s (%s)", site.SiteName, site.SiteURL)
   652  		})
   653  	case "search_sites":
   654  		return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
   655  			siteID: config.Result,
   656  		})
   657  	case "driveid_final":
   658  		finalDriveID := config.Result
   659  
   660  		// Test the driveID and get drive type
   661  		opts := rest.Opts{
   662  			Method:  "GET",
   663  			RootURL: graphURL,
   664  			Path:    "/drives/" + finalDriveID + "/root",
   665  		}
   666  		var rootItem api.Item
   667  		_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
   668  		if err != nil {
   669  			return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query root for drive %q: %v", finalDriveID, err))
   670  		}
   671  
   672  		m.Set(configDriveID, finalDriveID)
   673  		m.Set(configDriveType, rootItem.ParentReference.DriveType)
   674  
   675  		return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
   676  	case "driveid_final_end":
   677  		if config.Result == "true" {
   678  			return nil, nil
   679  		}
   680  		return fs.ConfigGoto("choose_type")
   681  	}
   682  	return nil, fmt.Errorf("unknown state %q", config.State)
   683  }
   684  
   685  // Options defines the configuration for this backend
   686  type Options struct {
   687  	Region                  string               `config:"region"`
   688  	ChunkSize               fs.SizeSuffix        `config:"chunk_size"`
   689  	DriveID                 string               `config:"drive_id"`
   690  	DriveType               string               `config:"drive_type"`
   691  	RootFolderID            string               `config:"root_folder_id"`
   692  	DisableSitePermission   bool                 `config:"disable_site_permission"`
   693  	AccessScopes            fs.SpaceSepList      `config:"access_scopes"`
   694  	ExposeOneNoteFiles      bool                 `config:"expose_onenote_files"`
   695  	ServerSideAcrossConfigs bool                 `config:"server_side_across_configs"`
   696  	ListChunk               int64                `config:"list_chunk"`
   697  	NoVersions              bool                 `config:"no_versions"`
   698  	LinkScope               string               `config:"link_scope"`
   699  	LinkType                string               `config:"link_type"`
   700  	LinkPassword            string               `config:"link_password"`
   701  	HashType                string               `config:"hash_type"`
   702  	AVOverride              bool                 `config:"av_override"`
   703  	Delta                   bool                 `config:"delta"`
   704  	Enc                     encoder.MultiEncoder `config:"encoding"`
   705  	MetadataPermissions     rwChoice             `config:"metadata_permissions"`
   706  }
   707  
   708  // Fs represents a remote OneDrive
   709  type Fs struct {
   710  	name         string             // name of this remote
   711  	root         string             // the path we are working on
   712  	opt          Options            // parsed options
   713  	ci           *fs.ConfigInfo     // global config
   714  	features     *fs.Features       // optional features
   715  	srv          *rest.Client       // the connection to the OneDrive server
   716  	unAuth       *rest.Client       // no authentication connection to the OneDrive server
   717  	dirCache     *dircache.DirCache // Map of directory path to directory id
   718  	pacer        *fs.Pacer          // pacer for API calls
   719  	tokenRenewer *oauthutil.Renew   // renew the token on expiry
   720  	driveID      string             // ID to use for querying Microsoft Graph
   721  	driveType    string             // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
   722  	hashType     hash.Type          // type of the hash we are using
   723  }
   724  
   725  // Object describes a OneDrive object
   726  //
   727  // Will definitely have info but maybe not meta
   728  type Object struct {
   729  	fs            *Fs       // what this object is part of
   730  	remote        string    // The remote path
   731  	hasMetaData   bool      // whether info below has been set
   732  	isOneNoteFile bool      // Whether the object is a OneNote file
   733  	size          int64     // size of the object
   734  	modTime       time.Time // modification time of the object
   735  	id            string    // ID of the object
   736  	hash          string    // Hash of the content, usually QuickXorHash but set as hash_type
   737  	mimeType      string    // Content-Type of object from server (may not be as uploaded)
   738  	meta          *Metadata // metadata properties
   739  }
   740  
   741  // Directory describes a OneDrive directory
   742  type Directory struct {
   743  	fs     *Fs       // what this object is part of
   744  	remote string    // The remote path
   745  	size   int64     // size of directory and contents or -1 if unknown
   746  	items  int64     // number of objects or -1 for unknown
   747  	id     string    // dir ID
   748  	meta   *Metadata // metadata properties
   749  }
   750  
   751  // ------------------------------------------------------------
   752  
   753  // Name of the remote (as passed into NewFs)
   754  func (f *Fs) Name() string {
   755  	return f.name
   756  }
   757  
   758  // Root of the remote (as passed into NewFs)
   759  func (f *Fs) Root() string {
   760  	return f.root
   761  }
   762  
   763  // String converts this Fs to a string
   764  func (f *Fs) String() string {
   765  	return fmt.Sprintf("OneDrive root '%s'", f.root)
   766  }
   767  
   768  // Features returns the optional features of this Fs
   769  func (f *Fs) Features() *fs.Features {
   770  	return f.features
   771  }
   772  
   773  // parsePath parses a OneDrive 'url'
   774  func parsePath(path string) (root string) {
   775  	root = strings.Trim(path, "/")
   776  	return
   777  }
   778  
   779  // retryErrorCodes is a slice of error codes that we will retry
   780  var retryErrorCodes = []int{
   781  	429, // Too Many Requests.
   782  	500, // Internal Server Error
   783  	502, // Bad Gateway
   784  	503, // Service Unavailable
   785  	504, // Gateway Timeout
   786  	509, // Bandwidth Limit Exceeded
   787  }
   788  
   789  var (
   790  	gatewayTimeoutError     sync.Once
   791  	errAsyncJobAccessDenied = errors.New("async job failed - access denied")
   792  )
   793  
   794  // shouldRetry returns a boolean as to whether this resp and err
   795  // deserve to be retried.  It returns the err as a convenience
   796  func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
   797  	if fserrors.ContextError(ctx, &err) {
   798  		return false, err
   799  	}
   800  	retry := false
   801  	if resp != nil {
   802  		switch resp.StatusCode {
   803  		case 400:
   804  			if apiErr, ok := err.(*api.Error); ok {
   805  				if apiErr.ErrorInfo.InnerError.Code == "pathIsTooLong" {
   806  					return false, fserrors.NoRetryError(err)
   807  				}
   808  			}
   809  		case 401:
   810  			if len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
   811  				retry = true
   812  				fs.Debugf(nil, "Should retry: %v", err)
   813  			} else if err != nil && strings.Contains(err.Error(), "Unable to initialize RPS") {
   814  				retry = true
   815  				fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
   816  			}
   817  		case 429: // Too Many Requests.
   818  			// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
   819  			if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
   820  				retryAfter, parseErr := strconv.Atoi(values[0])
   821  				if parseErr != nil {
   822  					fs.Debugf(nil, "Failed to parse Retry-After: %q: %v", values[0], parseErr)
   823  				} else {
   824  					duration := time.Second * time.Duration(retryAfter)
   825  					retry = true
   826  					err = pacer.RetryAfterError(err, duration)
   827  					fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
   828  				}
   829  			}
   830  		case 504: // Gateway timeout
   831  			gatewayTimeoutError.Do(func() {
   832  				fs.Errorf(nil, "%v: upload chunks may be taking too long - try reducing --onedrive-chunk-size or decreasing --transfers", err)
   833  			})
   834  		case 507: // Insufficient Storage
   835  			return false, fserrors.FatalError(err)
   836  		}
   837  	}
   838  	return retry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   839  }
   840  
   841  // readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
   842  // if `relPath` == "", it reads the metadata for the item with that ID.
   843  //
   844  // We address items using the pattern `drives/driveID/items/itemID:/relativePath`
   845  // instead of simply using `drives/driveID/root:/itemPath` because it works for
   846  // "shared with me" folders in OneDrive Personal (See #2536, #2778)
   847  // This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
   848  //
   849  // If `relPath` == ”, do not append the slash (See #3664)
   850  func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
   851  	opts, _ := f.newOptsCallWithIDPath(normalizedID, relPath, true, "GET", "")
   852  
   853  	err = f.pacer.Call(func() (bool, error) {
   854  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
   855  		return shouldRetry(ctx, resp, err)
   856  	})
   857  
   858  	return info, resp, err
   859  }
   860  
   861  // readMetaDataForPath reads the metadata from the path (relative to the absolute root)
   862  func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, resp *http.Response, err error) {
   863  	firstSlashIndex := strings.IndexRune(path, '/')
   864  
   865  	if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
   866  		opts := f.newOptsCallWithPath(ctx, path, "GET", "")
   867  		opts.Path = strings.TrimSuffix(opts.Path, ":")
   868  		err = f.pacer.Call(func() (bool, error) {
   869  			resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
   870  			return shouldRetry(ctx, resp, err)
   871  		})
   872  		return info, resp, err
   873  	}
   874  
   875  	// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
   876  	// For OneDrive Personal, we need to consider the "shared with me" folders.
   877  	// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
   878  	// by its path relative to the folder's ID relative to the sharer's driveID.
   879  	// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
   880  	// So we read metadata relative to a suitable folder's normalized ID.
   881  	var dirCacheFoundRoot bool
   882  	var rootNormalizedID string
   883  	if f.dirCache != nil {
   884  		rootNormalizedID, err = f.dirCache.RootID(ctx, false)
   885  		dirCacheRootIDExists := err == nil
   886  		if f.root == "" {
   887  			// if f.root == "", it means f.root is the absolute root of the drive
   888  			// and its ID should have been found in NewFs
   889  			dirCacheFoundRoot = dirCacheRootIDExists
   890  		} else if _, err := f.dirCache.RootParentID(ctx, false); err == nil {
   891  			// if root is in a folder, it must have a parent folder, and
   892  			// if dirCache has found root in NewFs, the parent folder's ID
   893  			// should be present.
   894  			// This RootParentID() check is a fix for #3164 which describes
   895  			// a possible case where the root is not found.
   896  			dirCacheFoundRoot = dirCacheRootIDExists
   897  		}
   898  	}
   899  
   900  	relPath, insideRoot := getRelativePathInsideBase(f.root, path)
   901  	var firstDir, baseNormalizedID string
   902  	if !insideRoot || !dirCacheFoundRoot {
   903  		// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
   904  		firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
   905  		info, resp, err := f.readMetaDataForPath(ctx, firstDir)
   906  		if err != nil {
   907  			return info, resp, err
   908  		}
   909  		baseNormalizedID = info.GetID()
   910  	} else {
   911  		if f.root != "" {
   912  			// Read metadata based on root
   913  			baseNormalizedID = rootNormalizedID
   914  		} else {
   915  			// Read metadata based on firstDir
   916  			firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
   917  			baseNormalizedID, err = f.dirCache.FindDir(ctx, firstDir, false)
   918  			if err != nil {
   919  				return nil, nil, err
   920  			}
   921  		}
   922  	}
   923  
   924  	return f.readMetaDataForPathRelativeToID(ctx, baseNormalizedID, relPath)
   925  }
   926  
   927  // errorHandler parses a non 2xx error response into an error
   928  func errorHandler(resp *http.Response) error {
   929  	// Decode error response
   930  	errResponse := new(api.Error)
   931  	err := rest.DecodeJSON(resp, &errResponse)
   932  	if err != nil {
   933  		fs.Debugf(nil, "Couldn't decode error response: %v", err)
   934  	}
   935  	if errResponse.ErrorInfo.Code == "" {
   936  		errResponse.ErrorInfo.Code = resp.Status
   937  	}
   938  	return errResponse
   939  }
   940  
   941  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   942  	const minChunkSize = fs.SizeSuffixBase
   943  	if cs%chunkSizeMultiple != 0 {
   944  		return fmt.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
   945  	}
   946  	if cs < minChunkSize {
   947  		return fmt.Errorf("%s is less than %s", cs, minChunkSize)
   948  	}
   949  	return nil
   950  }
   951  
   952  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   953  	err = checkUploadChunkSize(cs)
   954  	if err == nil {
   955  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   956  	}
   957  	return
   958  }
   959  
   960  // NewFs constructs an Fs from the path, container:path
   961  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
   962  	// Parse config into Options struct
   963  	opt := new(Options)
   964  	err := configstruct.Set(m, opt)
   965  	if err != nil {
   966  		return nil, err
   967  	}
   968  
   969  	err = checkUploadChunkSize(opt.ChunkSize)
   970  	if err != nil {
   971  		return nil, fmt.Errorf("onedrive: chunk size: %w", err)
   972  	}
   973  
   974  	if opt.DriveID == "" || opt.DriveType == "" {
   975  		return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
   976  	}
   977  
   978  	rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
   979  	oauthConfig.Scopes = opt.AccessScopes
   980  	if opt.DisableSitePermission {
   981  		oauthConfig.Scopes = scopeAccessWithoutSites
   982  	}
   983  	oauthConfig.Endpoint = oauth2.Endpoint{
   984  		AuthURL:  authEndpoint[opt.Region] + authPath,
   985  		TokenURL: authEndpoint[opt.Region] + tokenPath,
   986  	}
   987  
   988  	client := fshttp.NewClient(ctx)
   989  	root = parsePath(root)
   990  	oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, client)
   991  	if err != nil {
   992  		return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
   993  	}
   994  
   995  	ci := fs.GetConfig(ctx)
   996  	f := &Fs{
   997  		name:      name,
   998  		root:      root,
   999  		opt:       *opt,
  1000  		ci:        ci,
  1001  		driveID:   opt.DriveID,
  1002  		driveType: opt.DriveType,
  1003  		srv:       rest.NewClient(oAuthClient).SetRoot(rootURL),
  1004  		unAuth:    rest.NewClient(client).SetRoot(rootURL),
  1005  		pacer:     fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
  1006  		hashType:  QuickXorHashType,
  1007  	}
  1008  	f.features = (&fs.Features{
  1009  		CaseInsensitive:          true,
  1010  		ReadMimeType:             true,
  1011  		WriteMimeType:            false,
  1012  		CanHaveEmptyDirectories:  true,
  1013  		ServerSideAcrossConfigs:  opt.ServerSideAcrossConfigs,
  1014  		ReadMetadata:             true,
  1015  		WriteMetadata:            true,
  1016  		UserMetadata:             false,
  1017  		ReadDirMetadata:          true,
  1018  		WriteDirMetadata:         true,
  1019  		WriteDirSetModTime:       true,
  1020  		UserDirMetadata:          false,
  1021  		DirModTimeUpdatesOnWrite: false,
  1022  	}).Fill(ctx, f)
  1023  	f.srv.SetErrorHandler(errorHandler)
  1024  
  1025  	// Set the user defined hash
  1026  	if opt.HashType == "auto" || opt.HashType == "" {
  1027  		opt.HashType = QuickXorHashType.String()
  1028  	}
  1029  	err = f.hashType.Set(opt.HashType)
  1030  	if err != nil {
  1031  		return nil, err
  1032  	}
  1033  
  1034  	// Disable change polling in China region
  1035  	// See: https://github.com/rclone/rclone/issues/6444
  1036  	if f.opt.Region == regionCN {
  1037  		f.features.ChangeNotify = nil
  1038  	}
  1039  
  1040  	// Renew the token in the background
  1041  	f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
  1042  		_, _, err := f.readMetaDataForPath(ctx, "")
  1043  		return err
  1044  	})
  1045  
  1046  	// Get rootID
  1047  	rootID := opt.RootFolderID
  1048  	if rootID == "" {
  1049  		rootInfo, _, err := f.readMetaDataForPath(ctx, "")
  1050  		if err != nil {
  1051  			return nil, fmt.Errorf("failed to get root: %w", err)
  1052  		}
  1053  		rootID = rootInfo.GetID()
  1054  	}
  1055  	if rootID == "" {
  1056  		return nil, errors.New("failed to get root: ID was empty")
  1057  	}
  1058  
  1059  	f.dirCache = dircache.New(root, rootID, f)
  1060  
  1061  	// ListR only supported if delta set
  1062  	if !f.opt.Delta {
  1063  		f.features.ListR = nil
  1064  	}
  1065  
  1066  	// Find the current root
  1067  	err = f.dirCache.FindRoot(ctx, false)
  1068  	if err != nil {
  1069  		// Assume it is a file
  1070  		newRoot, remote := dircache.SplitPath(root)
  1071  		tempF := *f
  1072  		tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
  1073  		tempF.root = newRoot
  1074  		// Make new Fs which is the parent
  1075  		err = tempF.dirCache.FindRoot(ctx, false)
  1076  		if err != nil {
  1077  			// No root so return old f
  1078  			return f, nil
  1079  		}
  1080  		_, err := tempF.newObjectWithInfo(ctx, remote, nil)
  1081  		if err != nil {
  1082  			if err == fs.ErrorObjectNotFound {
  1083  				// File doesn't exist so return old f
  1084  				return f, nil
  1085  			}
  1086  			return nil, err
  1087  		}
  1088  		// XXX: update the old f here instead of returning tempF, since
  1089  		// `features` were already filled with functions having *f as a receiver.
  1090  		// See https://github.com/rclone/rclone/issues/2182
  1091  		f.dirCache = tempF.dirCache
  1092  		f.root = tempF.root
  1093  		// return an error with an fs which points to the parent
  1094  		return f, fs.ErrorIsFile
  1095  	}
  1096  	return f, nil
  1097  }
  1098  
  1099  // rootSlash returns root with a slash on if it is empty, otherwise empty string
  1100  func (f *Fs) rootSlash() string {
  1101  	if f.root == "" {
  1102  		return f.root
  1103  	}
  1104  	return f.root + "/"
  1105  }
  1106  
  1107  // Return an Object from a path
  1108  //
  1109  // If it can't be found it returns the error fs.ErrorObjectNotFound.
  1110  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
  1111  	o := &Object{
  1112  		fs:     f,
  1113  		remote: remote,
  1114  		meta:   f.newMetadata(remote),
  1115  	}
  1116  	var err error
  1117  	if info != nil {
  1118  		// Set info
  1119  		err = o.setMetaData(info)
  1120  	} else {
  1121  		err = o.readMetaData(ctx) // reads info and meta, returning an error
  1122  	}
  1123  	if err != nil {
  1124  		return nil, err
  1125  	}
  1126  	return o, nil
  1127  }
  1128  
  1129  // NewObject finds the Object at remote.  If it can't be found
  1130  // it returns the error fs.ErrorObjectNotFound.
  1131  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  1132  	return f.newObjectWithInfo(ctx, remote, nil)
  1133  }
  1134  
  1135  // FindLeaf finds a directory of name leaf in the folder with ID pathID
  1136  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
  1137  	// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
  1138  	_, ok := f.dirCache.GetInv(pathID)
  1139  	if !ok {
  1140  		return "", false, errors.New("couldn't find parent ID")
  1141  	}
  1142  	info, resp, err := f.readMetaDataForPathRelativeToID(ctx, pathID, leaf)
  1143  	if err != nil {
  1144  		if resp != nil && resp.StatusCode == http.StatusNotFound {
  1145  			return "", false, nil
  1146  		}
  1147  		return "", false, err
  1148  	}
  1149  	if info.GetPackageType() == api.PackageTypeOneNote {
  1150  		return "", false, errors.New("found OneNote file when looking for folder")
  1151  	}
  1152  	if info.GetFolder() == nil {
  1153  		return "", false, errors.New("found file when looking for folder")
  1154  	}
  1155  	return info.GetID(), true, nil
  1156  }
  1157  
  1158  // CreateDir makes a directory with pathID as parent and name leaf
  1159  func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) {
  1160  	// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
  1161  	var resp *http.Response
  1162  	var info *api.Item
  1163  	opts := f.newOptsCall(dirID, "POST", "/children")
  1164  	mkdir := api.CreateItemRequest{
  1165  		Name:             f.opt.Enc.FromStandardName(leaf),
  1166  		ConflictBehavior: "fail",
  1167  	}
  1168  	err = f.pacer.Call(func() (bool, error) {
  1169  		resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
  1170  		return shouldRetry(ctx, resp, err)
  1171  	})
  1172  	if err != nil {
  1173  		// fmt.Printf("...Error %v\n", err)
  1174  		return "", err
  1175  	}
  1176  
  1177  	// fmt.Printf("...Id %q\n", *info.Id)
  1178  	return info.GetID(), nil
  1179  }
  1180  
  1181  // list the objects into the function supplied
  1182  //
  1183  // If directories is set it only sends directories
  1184  // User function to process a File item from listAll
  1185  //
  1186  // If an error is returned then processing stops
  1187  type listAllFn func(*api.Item) error
  1188  
  1189  // Lists the directory required calling the user function on each item found
  1190  //
  1191  // If the user fn ever returns true then it early exits with found = true
  1192  //
  1193  // This listing function works on both normal listings and delta listings
  1194  func (f *Fs) _listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn, opts *rest.Opts, result any, pValue *[]api.Item, pNextLink *string) (err error) {
  1195  	for {
  1196  		var resp *http.Response
  1197  		err = f.pacer.Call(func() (bool, error) {
  1198  			resp, err = f.srv.CallJSON(ctx, opts, nil, result)
  1199  			return shouldRetry(ctx, resp, err)
  1200  		})
  1201  		if err != nil {
  1202  			return fmt.Errorf("couldn't list files: %w", err)
  1203  		}
  1204  		if len(*pValue) == 0 {
  1205  			break
  1206  		}
  1207  		for i := range *pValue {
  1208  			item := &(*pValue)[i]
  1209  			isFolder := item.GetFolder() != nil
  1210  			if isFolder {
  1211  				if filesOnly {
  1212  					continue
  1213  				}
  1214  			} else {
  1215  				if directoriesOnly {
  1216  					continue
  1217  				}
  1218  			}
  1219  			if item.Deleted != nil {
  1220  				continue
  1221  			}
  1222  			item.Name = f.opt.Enc.ToStandardName(item.GetName())
  1223  			err = fn(item)
  1224  			if err != nil {
  1225  				return err
  1226  			}
  1227  		}
  1228  		if *pNextLink == "" {
  1229  			break
  1230  		}
  1231  		opts.Path = ""
  1232  		opts.Parameters = nil
  1233  		opts.RootURL = *pNextLink
  1234  		// reset results
  1235  		*pNextLink = ""
  1236  		*pValue = nil
  1237  	}
  1238  	return nil
  1239  }
  1240  
  1241  // Lists the directory required calling the user function on each item found
  1242  //
  1243  // If the user fn ever returns true then it early exits with found = true
  1244  func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (err error) {
  1245  	// Top parameter asks for bigger pages of data
  1246  	// https://dev.onedrive.com/odata/optional-query-parameters.htm
  1247  	opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
  1248  	var result api.ListChildrenResponse
  1249  	return f._listAll(ctx, dirID, directoriesOnly, filesOnly, fn, &opts, &result, &result.Value, &result.NextLink)
  1250  }
  1251  
  1252  // Convert a list item into a DirEntry
  1253  //
  1254  // Can return nil for an item which should be skipped
  1255  func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (entry fs.DirEntry, err error) {
  1256  	if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
  1257  		fs.Debugf(info.Name, "OneNote file not shown in directory listing")
  1258  		return nil, nil
  1259  	}
  1260  	remote := path.Join(dir, info.GetName())
  1261  	folder := info.GetFolder()
  1262  	if folder != nil {
  1263  		// cache the directory ID for later lookups
  1264  		id := info.GetID()
  1265  		f.dirCache.Put(remote, id)
  1266  		d := f.newDir(id, remote)
  1267  		d.items = folder.ChildCount
  1268  		f.setSystemMetadata(info, d.meta, remote, dirMimeType)
  1269  		entry = d
  1270  	} else {
  1271  		o, err := f.newObjectWithInfo(ctx, remote, info)
  1272  		if err != nil {
  1273  			return nil, err
  1274  		}
  1275  		entry = o
  1276  	}
  1277  	return entry, nil
  1278  }
  1279  
  1280  // List the objects and directories in dir into entries.  The
  1281  // entries can be returned in any order but should be for a
  1282  // complete directory.
  1283  //
  1284  // dir should be "" to list the root, and should not have
  1285  // trailing slashes.
  1286  //
  1287  // This should return ErrDirNotFound if the directory isn't
  1288  // found.
  1289  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  1290  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
  1291  	if err != nil {
  1292  		return nil, err
  1293  	}
  1294  	err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
  1295  		entry, err := f.itemToDirEntry(ctx, dir, info)
  1296  		if err != nil {
  1297  			return err
  1298  		}
  1299  		if entry == nil {
  1300  			return nil
  1301  		}
  1302  		entries = append(entries, entry)
  1303  		return nil
  1304  	})
  1305  	if err != nil {
  1306  		return nil, err
  1307  	}
  1308  	return entries, nil
  1309  }
  1310  
  1311  // ListR lists the objects and directories of the Fs starting
  1312  // from dir recursively into out.
  1313  //
  1314  // dir should be "" to start from the root, and should not
  1315  // have trailing slashes.
  1316  //
  1317  // This should return ErrDirNotFound if the directory isn't
  1318  // found.
  1319  //
  1320  // It should call callback for each tranche of entries read.
  1321  // These need not be returned in any particular order.  If
  1322  // callback returns an error then the listing will stop
  1323  // immediately.
  1324  //
  1325  // Don't implement this unless you have a more efficient way
  1326  // of listing recursively than doing a directory traversal.
  1327  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1328  	// Make sure this ID is in the directory cache
  1329  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
  1330  	if err != nil {
  1331  		return err
  1332  	}
  1333  
  1334  	// ListR only works at the root of a onedrive, not on a folder
  1335  	// So we have to filter things outside of the root which is
  1336  	// inefficient.
  1337  
  1338  	list := walk.NewListRHelper(callback)
  1339  
  1340  	// list a folder conventionally - used for shared folders
  1341  	var listFolder func(dir string) error
  1342  	listFolder = func(dir string) error {
  1343  		entries, err := f.List(ctx, dir)
  1344  		if err != nil {
  1345  			return err
  1346  		}
  1347  		for _, entry := range entries {
  1348  			err = list.Add(entry)
  1349  			if err != nil {
  1350  				return err
  1351  			}
  1352  			if _, isDir := entry.(fs.Directory); isDir {
  1353  				err = listFolder(entry.Remote())
  1354  				if err != nil {
  1355  					return err
  1356  				}
  1357  			}
  1358  		}
  1359  		return nil
  1360  	}
  1361  
  1362  	// This code relies on the fact that directories are sent before their children. This isn't
  1363  	// mentioned in the docs though, so maybe it shouldn't be relied on.
  1364  	seen := map[string]struct{}{}
  1365  	fn := func(info *api.Item) error {
  1366  		var parentPath string
  1367  		var ok bool
  1368  		id := info.GetID()
  1369  		// The API can produce duplicates, so skip them
  1370  		if _, found := seen[id]; found {
  1371  			return nil
  1372  		}
  1373  		seen[id] = struct{}{}
  1374  		// Skip the root directory
  1375  		if id == directoryID {
  1376  			return nil
  1377  		}
  1378  		// Skip deleted items
  1379  		if info.Deleted != nil {
  1380  			return nil
  1381  		}
  1382  		dirID := info.GetParentReference().GetID()
  1383  		// Skip files that don't have their parent directory
  1384  		// cached as they are outside the root.
  1385  		parentPath, ok = f.dirCache.GetInv(dirID)
  1386  		if !ok {
  1387  			return nil
  1388  		}
  1389  		// Skip files not under the root directory
  1390  		remote := path.Join(parentPath, info.GetName())
  1391  		if dir != "" && !strings.HasPrefix(remote, dir+"/") {
  1392  			return nil
  1393  		}
  1394  		entry, err := f.itemToDirEntry(ctx, parentPath, info)
  1395  		if err != nil {
  1396  			return err
  1397  		}
  1398  		if entry == nil {
  1399  			return nil
  1400  		}
  1401  		err = list.Add(entry)
  1402  		if err != nil {
  1403  			return err
  1404  		}
  1405  		// If this is a shared folder, we'll need list it too
  1406  		if info.RemoteItem != nil && info.RemoteItem.Folder != nil {
  1407  			fs.Debugf(remote, "Listing shared directory")
  1408  			return listFolder(remote)
  1409  		}
  1410  		return nil
  1411  	}
  1412  
  1413  	opts := rest.Opts{
  1414  		Method: "GET",
  1415  		Path:   "/root/delta",
  1416  		Parameters: map[string][]string{
  1417  			// "token": {token},
  1418  			"$top": {fmt.Sprintf("%d", f.opt.ListChunk)},
  1419  		},
  1420  	}
  1421  
  1422  	var result api.DeltaResponse
  1423  	err = f._listAll(ctx, "", false, false, fn, &opts, &result, &result.Value, &result.NextLink)
  1424  	if err != nil {
  1425  		return err
  1426  	}
  1427  
  1428  	return list.Flush()
  1429  }
  1430  
  1431  // Shutdown shutdown the fs
  1432  func (f *Fs) Shutdown(ctx context.Context) error {
  1433  	f.tokenRenewer.Shutdown()
  1434  	return nil
  1435  }
  1436  
  1437  // Creates from the parameters passed in a half finished Object which
  1438  // must have setMetaData called on it
  1439  //
  1440  // Returns the object, leaf, directoryID and error.
  1441  //
  1442  // Used to create new objects
  1443  func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
  1444  	// Create the directory for the object if it doesn't exist
  1445  	leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
  1446  	if err != nil {
  1447  		return nil, leaf, directoryID, err
  1448  	}
  1449  	// Temporary Object under construction
  1450  	o = &Object{
  1451  		fs:     f,
  1452  		remote: remote,
  1453  	}
  1454  	return o, leaf, directoryID, nil
  1455  }
  1456  
  1457  // Put the object into the container
  1458  //
  1459  // Copy the reader in to the new object which is returned.
  1460  //
  1461  // The new object may have been created if an error is returned
  1462  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1463  	remote := src.Remote()
  1464  	size := src.Size()
  1465  	modTime := src.ModTime(ctx)
  1466  
  1467  	o, _, _, err := f.createObject(ctx, remote, modTime, size)
  1468  	if err != nil {
  1469  		return nil, err
  1470  	}
  1471  	return o, o.Update(ctx, in, src, options...)
  1472  }
  1473  
  1474  // Mkdir creates the container if it doesn't exist
  1475  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1476  	_, err := f.dirCache.FindDir(ctx, dir, true)
  1477  	return err
  1478  }
  1479  
  1480  // deleteObject removes an object by ID
  1481  func (f *Fs) deleteObject(ctx context.Context, id string) error {
  1482  	opts := f.newOptsCall(id, "DELETE", "")
  1483  	opts.NoResponse = true
  1484  
  1485  	return f.pacer.Call(func() (bool, error) {
  1486  		resp, err := f.srv.Call(ctx, &opts)
  1487  		return shouldRetry(ctx, resp, err)
  1488  	})
  1489  }
  1490  
  1491  // purgeCheck removes the root directory, if check is set then it
  1492  // refuses to do so if it has anything in
  1493  func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
  1494  	root := path.Join(f.root, dir)
  1495  	if root == "" {
  1496  		return errors.New("can't purge root directory")
  1497  	}
  1498  	dc := f.dirCache
  1499  	rootID, err := dc.FindDir(ctx, dir, false)
  1500  	if err != nil {
  1501  		return err
  1502  	}
  1503  	if check {
  1504  		// check to see if there are any items
  1505  		err := f.listAll(ctx, rootID, false, false, func(item *api.Item) error {
  1506  			return fs.ErrorDirectoryNotEmpty
  1507  		})
  1508  		if err != nil {
  1509  			return err
  1510  		}
  1511  	}
  1512  	err = f.deleteObject(ctx, rootID)
  1513  	if err != nil {
  1514  		return err
  1515  	}
  1516  	f.dirCache.FlushDir(dir)
  1517  	return nil
  1518  }
  1519  
  1520  // Rmdir deletes the root folder
  1521  //
  1522  // Returns an error if it isn't empty
  1523  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1524  	return f.purgeCheck(ctx, dir, true)
  1525  }
  1526  
  1527  // Precision return the precision of this Fs
  1528  func (f *Fs) Precision() time.Duration {
  1529  	if f.driveType == driveTypePersonal {
  1530  		return time.Millisecond
  1531  	}
  1532  	return time.Second
  1533  }
  1534  
  1535  // waitForJob waits for the job with status in url to complete
  1536  func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
  1537  	deadline := time.Now().Add(f.ci.TimeoutOrInfinite())
  1538  	for time.Now().Before(deadline) {
  1539  		var resp *http.Response
  1540  		var err error
  1541  		var body []byte
  1542  		err = f.pacer.Call(func() (bool, error) {
  1543  			resp, err = http.Get(location)
  1544  			if err != nil {
  1545  				return fserrors.ShouldRetry(err), err
  1546  			}
  1547  			body, err = rest.ReadBody(resp)
  1548  			return fserrors.ShouldRetry(err), err
  1549  		})
  1550  		if err != nil {
  1551  			return err
  1552  		}
  1553  		// Try to decode the body first as an api.AsyncOperationStatus
  1554  		var status api.AsyncOperationStatus
  1555  		err = json.Unmarshal(body, &status)
  1556  		if err != nil {
  1557  			return fmt.Errorf("async status result not JSON: %q: %w", body, err)
  1558  		}
  1559  
  1560  		switch status.Status {
  1561  		case "failed":
  1562  			if strings.HasPrefix(status.ErrorCode, "AccessDenied_") {
  1563  				return errAsyncJobAccessDenied
  1564  			}
  1565  			fallthrough
  1566  		case "deleteFailed":
  1567  			return fmt.Errorf("%s: async operation returned %q", o.remote, status.Status)
  1568  		case "completed":
  1569  			err = o.readMetaData(ctx)
  1570  			if err != nil {
  1571  				return fmt.Errorf("async operation completed but readMetaData failed: %w", err)
  1572  			}
  1573  			return nil
  1574  		}
  1575  
  1576  		time.Sleep(1 * time.Second)
  1577  	}
  1578  	return fmt.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
  1579  }
  1580  
  1581  // Copy src to this remote using server-side copy operations.
  1582  //
  1583  // This is stored with the remote path given.
  1584  //
  1585  // It returns the destination Object and a possible error.
  1586  //
  1587  // Will only be called if src.Fs().Name() == f.Name()
  1588  //
  1589  // If it isn't possible then return fs.ErrorCantCopy
  1590  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1591  	srcObj, ok := src.(*Object)
  1592  	if !ok {
  1593  		fs.Debugf(src, "Can't copy - not same remote type")
  1594  		return nil, fs.ErrorCantCopy
  1595  	}
  1596  
  1597  	if (f.driveType == driveTypePersonal && srcObj.fs.driveType != driveTypePersonal) || (f.driveType != driveTypePersonal && srcObj.fs.driveType == driveTypePersonal) {
  1598  		fs.Debugf(src, "Can't server-side copy - cross-drive between OneDrive Personal and OneDrive for business (SharePoint)")
  1599  		return nil, fs.ErrorCantCopy
  1600  	} else if f.driveType == driveTypeBusiness && srcObj.fs.driveType == driveTypeBusiness && srcObj.fs.driveID != f.driveID {
  1601  		fs.Debugf(src, "Can't server-side copy - cross-drive between difference OneDrive for business (Not SharePoint)")
  1602  		return nil, fs.ErrorCantCopy
  1603  	}
  1604  
  1605  	err := srcObj.readMetaData(ctx)
  1606  	if err != nil {
  1607  		return nil, err
  1608  	}
  1609  
  1610  	// Check we aren't overwriting a file on the same remote
  1611  	if srcObj.fs == f {
  1612  		srcPath := srcObj.rootPath()
  1613  		dstPath := f.rootPath(remote)
  1614  		if strings.EqualFold(srcPath, dstPath) {
  1615  			return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
  1616  		}
  1617  	}
  1618  
  1619  	// Create temporary object
  1620  	dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
  1621  	if err != nil {
  1622  		return nil, err
  1623  	}
  1624  
  1625  	// Copy the object
  1626  	// The query param is a workaround for OneDrive Business for #4590
  1627  	opts := f.newOptsCall(srcObj.id, "POST", "/copy?@microsoft.graph.conflictBehavior=replace")
  1628  	opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
  1629  	opts.NoResponse = true
  1630  
  1631  	id, dstDriveID, _ := f.parseNormalizedID(directoryID)
  1632  
  1633  	replacedLeaf := f.opt.Enc.FromStandardName(leaf)
  1634  	copyReq := api.CopyItemRequest{
  1635  		Name: &replacedLeaf,
  1636  		ParentReference: api.ItemReference{
  1637  			DriveID: dstDriveID,
  1638  			ID:      id,
  1639  		},
  1640  	}
  1641  	var resp *http.Response
  1642  	err = f.pacer.Call(func() (bool, error) {
  1643  		resp, err = f.srv.CallJSON(ctx, &opts, &copyReq, nil)
  1644  		return shouldRetry(ctx, resp, err)
  1645  	})
  1646  	if err != nil {
  1647  		return nil, err
  1648  	}
  1649  
  1650  	// read location header
  1651  	location := resp.Header.Get("Location")
  1652  	if location == "" {
  1653  		return nil, errors.New("didn't receive location header in copy response")
  1654  	}
  1655  
  1656  	// Wait for job to finish
  1657  	err = f.waitForJob(ctx, location, dstObj)
  1658  	if err == errAsyncJobAccessDenied {
  1659  		fs.Debugf(src, "Server-side copy failed - file not shared between drives")
  1660  		return nil, fs.ErrorCantCopy
  1661  	}
  1662  	if err != nil {
  1663  		return nil, err
  1664  	}
  1665  
  1666  	// Copy does NOT copy the modTime from the source and there seems to
  1667  	// be no way to set date before
  1668  	// This will create TWO versions on OneDrive
  1669  
  1670  	// Set modtime and adjust metadata if required
  1671  	_, err = dstObj.Metadata(ctx) // make sure we get the correct new normalizedID
  1672  	if err != nil {
  1673  		return nil, err
  1674  	}
  1675  	dstObj.meta.permsAddOnly = true // dst will have different IDs from src, so can't update/remove
  1676  	info, err := f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), dstObj)
  1677  	if err != nil {
  1678  		return nil, err
  1679  	}
  1680  	err = dstObj.setMetaData(info)
  1681  	return dstObj, err
  1682  }
  1683  
  1684  // Purge deletes all the files in the directory
  1685  //
  1686  // Optional interface: Only implement this if you have a way of
  1687  // deleting all the files quicker than just running Remove() on the
  1688  // result of List()
  1689  func (f *Fs) Purge(ctx context.Context, dir string) error {
  1690  	return f.purgeCheck(ctx, dir, false)
  1691  }
  1692  
  1693  // Move src to this remote using server-side move operations.
  1694  //
  1695  // This is stored with the remote path given.
  1696  //
  1697  // It returns the destination Object and a possible error.
  1698  //
  1699  // Will only be called if src.Fs().Name() == f.Name()
  1700  //
  1701  // If it isn't possible then return fs.ErrorCantMove
  1702  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1703  	srcObj, ok := src.(*Object)
  1704  	if !ok {
  1705  		fs.Debugf(src, "Can't move - not same remote type")
  1706  		return nil, fs.ErrorCantMove
  1707  	}
  1708  
  1709  	// Create temporary object
  1710  	dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
  1711  	if err != nil {
  1712  		return nil, err
  1713  	}
  1714  
  1715  	id, dstDriveID, _ := f.parseNormalizedID(directoryID)
  1716  	_, srcObjDriveID, _ := f.parseNormalizedID(srcObj.id)
  1717  
  1718  	if f.canonicalDriveID(dstDriveID) != srcObj.fs.canonicalDriveID(srcObjDriveID) {
  1719  		// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
  1720  		// "Items cannot be moved between Drives using this request."
  1721  		fs.Debugf(f, "Can't move files between drives (%q != %q)", dstDriveID, srcObjDriveID)
  1722  		return nil, fs.ErrorCantMove
  1723  	}
  1724  
  1725  	// Move the object
  1726  	opts := f.newOptsCall(srcObj.id, "PATCH", "")
  1727  
  1728  	move := api.MoveItemRequest{
  1729  		Name: f.opt.Enc.FromStandardName(leaf),
  1730  		ParentReference: &api.ItemReference{
  1731  			DriveID: dstDriveID,
  1732  			ID:      id,
  1733  		},
  1734  		// We set the mod time too as it gets reset otherwise
  1735  		FileSystemInfo: &api.FileSystemInfoFacet{
  1736  			CreatedDateTime:      api.Timestamp(srcObj.tryGetBtime(srcObj.modTime)),
  1737  			LastModifiedDateTime: api.Timestamp(srcObj.modTime),
  1738  		},
  1739  	}
  1740  	var resp *http.Response
  1741  	var info *api.Item
  1742  	err = f.pacer.Call(func() (bool, error) {
  1743  		resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
  1744  		return shouldRetry(ctx, resp, err)
  1745  	})
  1746  	if err != nil {
  1747  		return nil, err
  1748  	}
  1749  
  1750  	err = dstObj.setMetaData(info)
  1751  	if err != nil {
  1752  		return nil, err
  1753  	}
  1754  
  1755  	// Set modtime and adjust metadata if required
  1756  	info, err = f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), dstObj)
  1757  	if err != nil {
  1758  		return nil, err
  1759  	}
  1760  	err = dstObj.setMetaData(info)
  1761  	return dstObj, err
  1762  }
  1763  
  1764  // DirMove moves src, srcRemote to this remote at dstRemote
  1765  // using server-side move operations.
  1766  //
  1767  // Will only be called if src.Fs().Name() == f.Name()
  1768  //
  1769  // If it isn't possible then return fs.ErrorCantDirMove
  1770  //
  1771  // If destination exists then return fs.ErrorDirExists
  1772  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1773  	srcFs, ok := src.(*Fs)
  1774  	if !ok {
  1775  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  1776  		return fs.ErrorCantDirMove
  1777  	}
  1778  
  1779  	srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
  1780  	if err != nil {
  1781  		return err
  1782  	}
  1783  
  1784  	parsedDstDirID, dstDriveID, _ := f.parseNormalizedID(dstDirectoryID)
  1785  	_, srcDriveID, _ := f.parseNormalizedID(srcID)
  1786  
  1787  	if f.canonicalDriveID(dstDriveID) != srcFs.canonicalDriveID(srcDriveID) {
  1788  		// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
  1789  		// "Items cannot be moved between Drives using this request."
  1790  		fs.Debugf(f, "Can't move directories between drives (%q != %q)", dstDriveID, srcDriveID)
  1791  		return fs.ErrorCantDirMove
  1792  	}
  1793  
  1794  	// Get timestamps of src so they can be preserved
  1795  	srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(ctx, srcID, "")
  1796  	if err != nil {
  1797  		return err
  1798  	}
  1799  
  1800  	// Do the move
  1801  	opts := f.newOptsCall(srcID, "PATCH", "")
  1802  	move := api.MoveItemRequest{
  1803  		Name: f.opt.Enc.FromStandardName(dstLeaf),
  1804  		ParentReference: &api.ItemReference{
  1805  			DriveID: dstDriveID,
  1806  			ID:      parsedDstDirID,
  1807  		},
  1808  		// We set the mod time too as it gets reset otherwise
  1809  		FileSystemInfo: &api.FileSystemInfoFacet{
  1810  			CreatedDateTime:      srcInfo.CreatedDateTime,
  1811  			LastModifiedDateTime: srcInfo.LastModifiedDateTime,
  1812  		},
  1813  	}
  1814  	var resp *http.Response
  1815  	var info api.Item
  1816  	err = f.pacer.Call(func() (bool, error) {
  1817  		resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
  1818  		return shouldRetry(ctx, resp, err)
  1819  	})
  1820  	if err != nil {
  1821  		return err
  1822  	}
  1823  
  1824  	srcFs.dirCache.FlushDir(srcRemote)
  1825  	return nil
  1826  }
  1827  
  1828  // DirCacheFlush resets the directory cache - used in testing as an
  1829  // optional interface
  1830  func (f *Fs) DirCacheFlush() {
  1831  	f.dirCache.ResetRoot()
  1832  }
  1833  
  1834  // About gets quota information
  1835  func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
  1836  	var drive api.Drive
  1837  	opts := rest.Opts{
  1838  		Method: "GET",
  1839  		Path:   "",
  1840  	}
  1841  	var resp *http.Response
  1842  	err = f.pacer.Call(func() (bool, error) {
  1843  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
  1844  		return shouldRetry(ctx, resp, err)
  1845  	})
  1846  	if err != nil {
  1847  		return nil, err
  1848  	}
  1849  	q := drive.Quota
  1850  	// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
  1851  	if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
  1852  		return &fs.Usage{}, nil
  1853  	}
  1854  	usage = &fs.Usage{
  1855  		Total:   fs.NewUsageValue(q.Total),     // quota of bytes that can be used
  1856  		Used:    fs.NewUsageValue(q.Used),      // bytes in use
  1857  		Trashed: fs.NewUsageValue(q.Deleted),   // bytes in trash
  1858  		Free:    fs.NewUsageValue(q.Remaining), // bytes which can be uploaded before reaching the quota
  1859  	}
  1860  	return usage, nil
  1861  }
  1862  
  1863  // Hashes returns the supported hash sets.
  1864  func (f *Fs) Hashes() hash.Set {
  1865  	return hash.Set(f.hashType)
  1866  }
  1867  
  1868  // PublicLink returns a link for downloading without account.
  1869  func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
  1870  	info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote))
  1871  	if err != nil {
  1872  		return "", err
  1873  	}
  1874  	opts := f.newOptsCall(info.GetID(), "POST", "/createLink")
  1875  
  1876  	share := api.CreateShareLinkRequest{
  1877  		Type:     f.opt.LinkType,
  1878  		Scope:    f.opt.LinkScope,
  1879  		Password: f.opt.LinkPassword,
  1880  	}
  1881  
  1882  	if expire < fs.DurationOff {
  1883  		expiry := time.Now().Add(time.Duration(expire))
  1884  		share.Expiry = &expiry
  1885  	}
  1886  
  1887  	var resp *http.Response
  1888  	var result api.CreateShareLinkResponse
  1889  	err = f.pacer.Call(func() (bool, error) {
  1890  		resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
  1891  		return shouldRetry(ctx, resp, err)
  1892  	})
  1893  	if err != nil {
  1894  		if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
  1895  			return "", fmt.Errorf("%v (is making public links permitted by the org admin?)", err)
  1896  		}
  1897  		return "", err
  1898  	}
  1899  
  1900  	shareURL := result.Link.WebURL
  1901  
  1902  	// Convert share link to direct download link if target is not a folder
  1903  	// Not attempting to do the conversion for regional versions, just to be safe
  1904  	if f.opt.Region != regionGlobal {
  1905  		return shareURL, nil
  1906  	}
  1907  	if info.Folder != nil {
  1908  		fs.Debugf(nil, "Can't convert share link for folder to direct link - returning the link as is")
  1909  		return shareURL, nil
  1910  	}
  1911  
  1912  	cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
  1913  	directURL := ""
  1914  	segments := strings.Split(shareURL, "/")
  1915  	switch f.driveType {
  1916  	case driveTypePersonal:
  1917  		// Method: https://stackoverflow.com/questions/37951114/direct-download-link-to-onedrive-file
  1918  		if len(segments) != 5 {
  1919  			fs.Logf(f, cnvFailMsg)
  1920  			return shareURL, nil
  1921  		}
  1922  		enc := base64.StdEncoding.EncodeToString([]byte(shareURL))
  1923  		enc = strings.ReplaceAll(enc, "/", "_")
  1924  		enc = strings.ReplaceAll(enc, "+", "-")
  1925  		enc = strings.ReplaceAll(enc, "=", "")
  1926  		directURL = fmt.Sprintf("https://api.onedrive.com/v1.0/shares/u!%s/root/content", enc)
  1927  	case driveTypeBusiness:
  1928  		// Method: https://docs.microsoft.com/en-us/sharepoint/dev/spfx/shorter-share-link-format
  1929  		// Example:
  1930  		//   https://{tenant}-my.sharepoint.com/:t:/g/personal/{user_email}/{Opaque_String}
  1931  		//   --convert to->
  1932  		//   https://{tenant}-my.sharepoint.com/personal/{user_email}/_layouts/15/download.aspx?share={Opaque_String}
  1933  		if len(segments) != 8 {
  1934  			fs.Logf(f, cnvFailMsg)
  1935  			return shareURL, nil
  1936  		}
  1937  		directURL = fmt.Sprintf("https://%s/%s/%s/_layouts/15/download.aspx?share=%s",
  1938  			segments[2], segments[5], segments[6], segments[7])
  1939  	case driveTypeSharepoint:
  1940  		// Method: Similar to driveTypeBusiness
  1941  		// Example:
  1942  		//   https://{tenant}.sharepoint.com/:t:/s/{site_name}/{Opaque_String}
  1943  		//   --convert to->
  1944  		//   https://{tenant}.sharepoint.com/sites/{site_name}/_layouts/15/download.aspx?share={Opaque_String}
  1945  		//
  1946  		//   https://{tenant}.sharepoint.com/:t:/t/{team_name}/{Opaque_String}
  1947  		//   --convert to->
  1948  		//   https://{tenant}.sharepoint.com/teams/{team_name}/_layouts/15/download.aspx?share={Opaque_String}
  1949  		//
  1950  		//   https://{tenant}.sharepoint.com/:t:/g/{Opaque_String}
  1951  		//   --convert to->
  1952  		//   https://{tenant}.sharepoint.com/_layouts/15/download.aspx?share={Opaque_String}
  1953  		if len(segments) < 6 || len(segments) > 7 {
  1954  			fs.Logf(f, cnvFailMsg)
  1955  			return shareURL, nil
  1956  		}
  1957  		pathPrefix := ""
  1958  		switch segments[4] {
  1959  		case "s": // Site
  1960  			pathPrefix = "/sites/" + segments[5]
  1961  		case "t": // Team
  1962  			pathPrefix = "/teams/" + segments[5]
  1963  		case "g": // Root site
  1964  		default:
  1965  			fs.Logf(f, cnvFailMsg)
  1966  			return shareURL, nil
  1967  		}
  1968  		directURL = fmt.Sprintf("https://%s%s/_layouts/15/download.aspx?share=%s",
  1969  			segments[2], pathPrefix, segments[len(segments)-1])
  1970  	}
  1971  
  1972  	return directURL, nil
  1973  }
  1974  
  1975  // CleanUp deletes all the hidden files.
  1976  func (f *Fs) CleanUp(ctx context.Context) error {
  1977  	token := make(chan struct{}, f.ci.Checkers)
  1978  	var wg sync.WaitGroup
  1979  	err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
  1980  		if err != nil {
  1981  			fs.Errorf(f, "Failed to list %q: %v", path, err)
  1982  			return nil
  1983  		}
  1984  		err = entries.ForObjectError(func(obj fs.Object) error {
  1985  			o, ok := obj.(*Object)
  1986  			if !ok {
  1987  				return errors.New("internal error: not a onedrive object")
  1988  			}
  1989  			wg.Add(1)
  1990  			token <- struct{}{}
  1991  			go func() {
  1992  				defer func() {
  1993  					<-token
  1994  					wg.Done()
  1995  				}()
  1996  				err := o.deleteVersions(ctx)
  1997  				if err != nil {
  1998  					fs.Errorf(o, "Failed to remove versions: %v", err)
  1999  				}
  2000  			}()
  2001  			return nil
  2002  		})
  2003  		wg.Wait()
  2004  		return err
  2005  	})
  2006  	return err
  2007  }
  2008  
  2009  // Finds and removes any old versions for o
  2010  func (o *Object) deleteVersions(ctx context.Context) error {
  2011  	opts := o.fs.newOptsCall(o.id, "GET", "/versions")
  2012  	var versions api.VersionsResponse
  2013  	err := o.fs.pacer.Call(func() (bool, error) {
  2014  		resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
  2015  		return shouldRetry(ctx, resp, err)
  2016  	})
  2017  	if err != nil {
  2018  		return err
  2019  	}
  2020  	if len(versions.Versions) < 2 {
  2021  		return nil
  2022  	}
  2023  	for _, version := range versions.Versions[1:] {
  2024  		err = o.deleteVersion(ctx, version.ID)
  2025  		if err != nil {
  2026  			return err
  2027  		}
  2028  	}
  2029  	return nil
  2030  }
  2031  
  2032  // Finds and removes any old versions for o
  2033  func (o *Object) deleteVersion(ctx context.Context, ID string) error {
  2034  	if operations.SkipDestructive(ctx, fmt.Sprintf("%s of %s", ID, o.remote), "delete version") {
  2035  		return nil
  2036  	}
  2037  	fs.Infof(o, "removing version %q", ID)
  2038  	opts := o.fs.newOptsCall(o.id, "DELETE", "/versions/"+ID)
  2039  	opts.NoResponse = true
  2040  	return o.fs.pacer.Call(func() (bool, error) {
  2041  		resp, err := o.fs.srv.Call(ctx, &opts)
  2042  		return shouldRetry(ctx, resp, err)
  2043  	})
  2044  }
  2045  
  2046  // ------------------------------------------------------------
  2047  
  2048  // Fs returns the parent Fs
  2049  func (o *Object) Fs() fs.Info {
  2050  	return o.fs
  2051  }
  2052  
  2053  // Return a string version
  2054  func (o *Object) String() string {
  2055  	if o == nil {
  2056  		return "<nil>"
  2057  	}
  2058  	return o.remote
  2059  }
  2060  
  2061  // Remote returns the remote path
  2062  func (o *Object) Remote() string {
  2063  	return o.remote
  2064  }
  2065  
  2066  // rootPath returns a path for use in server given a remote
  2067  func (f *Fs) rootPath(remote string) string {
  2068  	return f.rootSlash() + remote
  2069  }
  2070  
  2071  // rootPath returns a path for use in local functions
  2072  func (o *Object) rootPath() string {
  2073  	return o.fs.rootPath(o.remote)
  2074  }
  2075  
  2076  // Hash returns the SHA-1 of an object returning a lowercase hex string
  2077  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  2078  	if t == o.fs.hashType {
  2079  		return o.hash, nil
  2080  	}
  2081  	return "", hash.ErrUnsupported
  2082  }
  2083  
  2084  // Size returns the size of an object in bytes
  2085  func (o *Object) Size() int64 {
  2086  	err := o.readMetaData(context.TODO())
  2087  	if err != nil {
  2088  		fs.Logf(o, "Failed to read metadata: %v", err)
  2089  		return 0
  2090  	}
  2091  	return o.size
  2092  }
  2093  
  2094  // setMetaData sets the metadata from info
  2095  func (o *Object) setMetaData(info *api.Item) (err error) {
  2096  	if info.GetFolder() != nil {
  2097  		log.Stack(o, "setMetaData called on dir instead of obj")
  2098  		return fs.ErrorIsDir
  2099  	}
  2100  	o.hasMetaData = true
  2101  	o.size = info.GetSize()
  2102  
  2103  	o.isOneNoteFile = info.GetPackageType() == api.PackageTypeOneNote
  2104  
  2105  	// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
  2106  	//
  2107  	// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
  2108  	file := info.GetFile()
  2109  	if file != nil {
  2110  		o.mimeType = file.MimeType
  2111  		o.hash = ""
  2112  		switch o.fs.hashType {
  2113  		case QuickXorHashType:
  2114  			if file.Hashes.QuickXorHash != "" {
  2115  				h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
  2116  				if err != nil {
  2117  					fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
  2118  				} else {
  2119  					o.hash = hex.EncodeToString(h)
  2120  				}
  2121  			}
  2122  		case hash.SHA1:
  2123  			o.hash = strings.ToLower(file.Hashes.Sha1Hash)
  2124  		case hash.SHA256:
  2125  			o.hash = strings.ToLower(file.Hashes.Sha256Hash)
  2126  		case hash.CRC32:
  2127  			o.hash = strings.ToLower(file.Hashes.Crc32Hash)
  2128  		}
  2129  	}
  2130  	fileSystemInfo := info.GetFileSystemInfo()
  2131  	if fileSystemInfo != nil {
  2132  		o.modTime = time.Time(fileSystemInfo.LastModifiedDateTime)
  2133  	} else {
  2134  		o.modTime = time.Time(info.GetLastModifiedDateTime())
  2135  	}
  2136  	o.id = info.GetID()
  2137  	if o.meta == nil {
  2138  		o.meta = o.fs.newMetadata(o.Remote())
  2139  	}
  2140  	o.fs.setSystemMetadata(info, o.meta, o.remote, o.mimeType)
  2141  	return nil
  2142  }
  2143  
  2144  // sets system metadata shared by both objects and directories
  2145  func (f *Fs) setSystemMetadata(info *api.Item, meta *Metadata, remote string, mimeType string) {
  2146  	meta.fs = f
  2147  	meta.remote = remote
  2148  	meta.mimeType = mimeType
  2149  	if info == nil {
  2150  		fs.Errorf("setSystemMetadata", "internal error: info is nil")
  2151  	}
  2152  	fileSystemInfo := info.GetFileSystemInfo()
  2153  	if fileSystemInfo != nil {
  2154  		meta.mtime = time.Time(fileSystemInfo.LastModifiedDateTime)
  2155  		meta.btime = time.Time(fileSystemInfo.CreatedDateTime)
  2156  
  2157  	} else {
  2158  		meta.mtime = time.Time(info.GetLastModifiedDateTime())
  2159  		meta.btime = time.Time(info.GetCreatedDateTime())
  2160  	}
  2161  	meta.utime = time.Time(info.GetCreatedDateTime())
  2162  	meta.description = info.Description
  2163  	meta.packageType = info.GetPackageType()
  2164  	meta.createdBy = info.GetCreatedBy()
  2165  	meta.lastModifiedBy = info.GetLastModifiedBy()
  2166  	meta.malwareDetected = info.MalwareDetected()
  2167  	meta.shared = info.Shared
  2168  	meta.normalizedID = info.GetID()
  2169  }
  2170  
  2171  // readMetaData gets the metadata if it hasn't already been fetched
  2172  //
  2173  // it also sets the info
  2174  func (o *Object) readMetaData(ctx context.Context) (err error) {
  2175  	if o.hasMetaData {
  2176  		return nil
  2177  	}
  2178  	info, _, err := o.fs.readMetaDataForPath(ctx, o.rootPath())
  2179  	if err != nil {
  2180  		if apiErr, ok := err.(*api.Error); ok {
  2181  			if apiErr.ErrorInfo.Code == "itemNotFound" {
  2182  				return fs.ErrorObjectNotFound
  2183  			}
  2184  		}
  2185  		return err
  2186  	}
  2187  	return o.setMetaData(info)
  2188  }
  2189  
  2190  // ModTime returns the modification time of the object
  2191  //
  2192  // It attempts to read the objects mtime and if that isn't present the
  2193  // LastModified returned in the http headers
  2194  func (o *Object) ModTime(ctx context.Context) time.Time {
  2195  	err := o.readMetaData(ctx)
  2196  	if err != nil {
  2197  		fs.Logf(o, "Failed to read metadata: %v", err)
  2198  		return time.Now()
  2199  	}
  2200  	return o.modTime
  2201  }
  2202  
  2203  // setModTime sets the modification time of the local fs object
  2204  func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
  2205  	opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PATCH", "")
  2206  	update := api.SetFileSystemInfo{
  2207  		FileSystemInfo: api.FileSystemInfoFacet{
  2208  			CreatedDateTime:      api.Timestamp(o.tryGetBtime(modTime)),
  2209  			LastModifiedDateTime: api.Timestamp(modTime),
  2210  		},
  2211  	}
  2212  	var info *api.Item
  2213  	err := o.fs.pacer.Call(func() (bool, error) {
  2214  		resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
  2215  		return shouldRetry(ctx, resp, err)
  2216  	})
  2217  	// Remove versions if required
  2218  	if o.fs.opt.NoVersions {
  2219  		err := o.deleteVersions(ctx)
  2220  		if err != nil {
  2221  			fs.Errorf(o, "Failed to remove versions: %v", err)
  2222  		}
  2223  	}
  2224  	return info, err
  2225  }
  2226  
  2227  // SetModTime sets the modification time of the local fs object
  2228  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  2229  	info, err := o.setModTime(ctx, modTime)
  2230  	if err != nil {
  2231  		return err
  2232  	}
  2233  	return o.setMetaData(info)
  2234  }
  2235  
  2236  // Storable returns a boolean showing whether this object storable
  2237  func (o *Object) Storable() bool {
  2238  	return true
  2239  }
  2240  
  2241  // Open an object for read
  2242  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2243  	if o.id == "" {
  2244  		return nil, errors.New("can't download - no id")
  2245  	}
  2246  	if o.isOneNoteFile {
  2247  		return nil, errors.New("can't open a OneNote file")
  2248  	}
  2249  
  2250  	fs.FixRangeOption(options, o.size)
  2251  	var resp *http.Response
  2252  	opts := o.fs.newOptsCall(o.id, "GET", "/content")
  2253  	opts.Options = options
  2254  	if o.fs.opt.AVOverride {
  2255  		opts.Parameters = url.Values{"AVOverride": {"1"}}
  2256  	}
  2257  	// Make a note of the redirect target as we need to call it without Auth
  2258  	var redirectReq *http.Request
  2259  	opts.CheckRedirect = func(req *http.Request, via []*http.Request) error {
  2260  		if len(via) >= 10 {
  2261  			return errors.New("stopped after 10 redirects")
  2262  		}
  2263  		req.Header.Del("Authorization") // remove Auth header
  2264  		redirectReq = req
  2265  		return http.ErrUseLastResponse
  2266  	}
  2267  
  2268  	err = o.fs.pacer.Call(func() (bool, error) {
  2269  		resp, err = o.fs.srv.Call(ctx, &opts)
  2270  		if redirectReq != nil {
  2271  			// It is a redirect which we are expecting
  2272  			err = nil
  2273  		}
  2274  		return shouldRetry(ctx, resp, err)
  2275  	})
  2276  	if err != nil {
  2277  		if resp != nil {
  2278  			if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
  2279  				err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
  2280  			}
  2281  		}
  2282  		return nil, err
  2283  	}
  2284  	if redirectReq != nil {
  2285  		err = o.fs.pacer.Call(func() (bool, error) {
  2286  			resp, err = o.fs.unAuth.Do(redirectReq)
  2287  			return shouldRetry(ctx, resp, err)
  2288  		})
  2289  		if err != nil {
  2290  			if resp != nil {
  2291  				if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
  2292  					err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
  2293  				}
  2294  			}
  2295  			return nil, err
  2296  		}
  2297  	}
  2298  
  2299  	if resp.StatusCode == http.StatusOK && resp.ContentLength > 0 && resp.Header.Get("Content-Range") == "" {
  2300  		// Overwrite size with actual size since size readings from Onedrive is unreliable.
  2301  		o.size = resp.ContentLength
  2302  	}
  2303  	return resp.Body, err
  2304  }
  2305  
  2306  // createUploadSession creates an upload session for the object
  2307  func (o *Object) createUploadSession(ctx context.Context, src fs.ObjectInfo, modTime time.Time) (response *api.CreateUploadResponse, metadata fs.Metadata, err error) {
  2308  	opts := o.fs.newOptsCallWithPath(ctx, o.remote, "POST", "/createUploadSession")
  2309  	createRequest, metadata, err := o.fetchMetadataForCreate(ctx, src, opts.Options, modTime)
  2310  	if err != nil {
  2311  		return nil, metadata, err
  2312  	}
  2313  	var resp *http.Response
  2314  	err = o.fs.pacer.Call(func() (bool, error) {
  2315  		resp, err = o.fs.srv.CallJSON(ctx, &opts, &createRequest, &response)
  2316  		if apiErr, ok := err.(*api.Error); ok {
  2317  			if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
  2318  				// Make the error more user-friendly
  2319  				err = errors.New(err.Error() + " (is it a OneNote file?)")
  2320  			}
  2321  		}
  2322  		return shouldRetry(ctx, resp, err)
  2323  	})
  2324  	return response, metadata, err
  2325  }
  2326  
  2327  // getPosition gets the current position in a multipart upload
  2328  func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err error) {
  2329  	opts := rest.Opts{
  2330  		Method:  "GET",
  2331  		RootURL: url,
  2332  	}
  2333  	var info api.UploadFragmentResponse
  2334  	var resp *http.Response
  2335  	err = o.fs.pacer.Call(func() (bool, error) {
  2336  		resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
  2337  		return shouldRetry(ctx, resp, err)
  2338  	})
  2339  	if err != nil {
  2340  		return 0, err
  2341  	}
  2342  	if len(info.NextExpectedRanges) != 1 {
  2343  		return 0, fmt.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
  2344  	}
  2345  	position := info.NextExpectedRanges[0]
  2346  	i := strings.IndexByte(position, '-')
  2347  	if i < 0 {
  2348  		return 0, fmt.Errorf("no '-' in next expected range: %q", position)
  2349  	}
  2350  	position = position[:i]
  2351  	pos, err = strconv.ParseInt(position, 10, 64)
  2352  	if err != nil {
  2353  		return 0, fmt.Errorf("bad expected range: %q: %w", position, err)
  2354  	}
  2355  	return pos, nil
  2356  }
  2357  
  2358  // uploadFragment uploads a part
  2359  func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64, options ...fs.OpenOption) (info *api.Item, err error) {
  2360  	//	var response api.UploadFragmentResponse
  2361  	var resp *http.Response
  2362  	var body []byte
  2363  	skip := int64(0)
  2364  	err = o.fs.pacer.Call(func() (bool, error) {
  2365  		toSend := chunkSize - skip
  2366  		opts := rest.Opts{
  2367  			Method:        "PUT",
  2368  			RootURL:       url,
  2369  			ContentLength: &toSend,
  2370  			ContentRange:  fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
  2371  			Body:          chunk,
  2372  			Options:       options,
  2373  		}
  2374  		_, _ = chunk.Seek(skip, io.SeekStart)
  2375  		resp, err = o.fs.unAuth.Call(ctx, &opts)
  2376  		if err != nil && resp != nil && resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
  2377  			fs.Debugf(o, "Received 416 error - reading current position from server: %v", err)
  2378  			pos, posErr := o.getPosition(ctx, url)
  2379  			if posErr != nil {
  2380  				fs.Debugf(o, "Failed to read position: %v", posErr)
  2381  				return false, posErr
  2382  			}
  2383  			skip = pos - start
  2384  			fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
  2385  			switch {
  2386  			case skip < 0:
  2387  				return false, fmt.Errorf("sent block already (skip %d < 0), can't rewind: %w", skip, err)
  2388  			case skip > chunkSize:
  2389  				return false, fmt.Errorf("position is in the future (skip %d > chunkSize %d), can't skip forward: %w", skip, chunkSize, err)
  2390  			case skip == chunkSize:
  2391  				fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
  2392  				return false, nil
  2393  			}
  2394  			return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
  2395  		}
  2396  		if err != nil {
  2397  			return shouldRetry(ctx, resp, err)
  2398  		}
  2399  		body, err = rest.ReadBody(resp)
  2400  		if err != nil {
  2401  			return shouldRetry(ctx, resp, err)
  2402  		}
  2403  		if resp.StatusCode == 200 || resp.StatusCode == 201 {
  2404  			// we are done :)
  2405  			// read the item
  2406  			info = &api.Item{}
  2407  			return false, json.Unmarshal(body, info)
  2408  		}
  2409  		return false, nil
  2410  	})
  2411  	return info, err
  2412  }
  2413  
  2414  // cancelUploadSession cancels an upload session
  2415  func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error) {
  2416  	opts := rest.Opts{
  2417  		Method:     "DELETE",
  2418  		RootURL:    url,
  2419  		NoResponse: true,
  2420  	}
  2421  	var resp *http.Response
  2422  	err = o.fs.pacer.Call(func() (bool, error) {
  2423  		resp, err = o.fs.srv.Call(ctx, &opts)
  2424  		return shouldRetry(ctx, resp, err)
  2425  	})
  2426  	return
  2427  }
  2428  
  2429  // uploadMultipart uploads a file using multipart upload
  2430  // if there is metadata, it will be set at the same time, except for permissions, which must be set after (if present and enabled).
  2431  func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
  2432  	size := src.Size()
  2433  	modTime := src.ModTime(ctx)
  2434  	if size <= 0 {
  2435  		return nil, errors.New("unknown-sized upload not supported")
  2436  	}
  2437  
  2438  	// Create upload session
  2439  	fs.Debugf(o, "Starting multipart upload")
  2440  	session, metadata, err := o.createUploadSession(ctx, src, modTime)
  2441  	if err != nil {
  2442  		return nil, err
  2443  	}
  2444  	uploadURL := session.UploadURL
  2445  
  2446  	// Cancel the session if something went wrong
  2447  	defer atexit.OnError(&err, func() {
  2448  		fs.Debugf(o, "Cancelling multipart upload: %v", err)
  2449  		cancelErr := o.cancelUploadSession(ctx, uploadURL)
  2450  		if cancelErr != nil {
  2451  			fs.Logf(o, "Failed to cancel multipart upload: %v (upload failed due to: %v)", cancelErr, err)
  2452  		}
  2453  	})()
  2454  
  2455  	// Upload the chunks
  2456  	remaining := size
  2457  	position := int64(0)
  2458  	for remaining > 0 {
  2459  		n := int64(o.fs.opt.ChunkSize)
  2460  		if remaining < n {
  2461  			n = remaining
  2462  		}
  2463  		seg := readers.NewRepeatableReader(io.LimitReader(in, n))
  2464  		fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
  2465  		info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
  2466  		if err != nil {
  2467  			return nil, err
  2468  		}
  2469  		remaining -= n
  2470  		position += n
  2471  	}
  2472  
  2473  	err = o.setMetaData(info)
  2474  	if err != nil {
  2475  		return info, err
  2476  	}
  2477  	if metadata == nil || !o.fs.needsUpdatePermissions(metadata) {
  2478  		return info, err
  2479  	}
  2480  	info, err = o.updateMetadata(ctx, metadata) // for permissions, which can't be set during original upload
  2481  	if info == nil {
  2482  		return nil, err
  2483  	}
  2484  	return info, o.setMetaData(info)
  2485  }
  2486  
  2487  // Update the content of a remote file within 4 MiB size in one single request
  2488  // (currently only used when size is exactly 0)
  2489  // This function will set modtime and metadata after uploading, which will create a new version for the remote file
  2490  func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
  2491  	size := src.Size()
  2492  	if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
  2493  		return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
  2494  	}
  2495  
  2496  	fs.Debugf(o, "Starting singlepart upload")
  2497  	var resp *http.Response
  2498  	opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PUT", "/content")
  2499  	opts.ContentLength = &size
  2500  	opts.Body = in
  2501  	opts.Options = options
  2502  
  2503  	err = o.fs.pacer.Call(func() (bool, error) {
  2504  		resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
  2505  		if apiErr, ok := err.(*api.Error); ok {
  2506  			if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
  2507  				// Make the error more user-friendly
  2508  				err = errors.New(err.Error() + " (is it a OneNote file?)")
  2509  			}
  2510  		}
  2511  		return shouldRetry(ctx, resp, err)
  2512  	})
  2513  	if err != nil {
  2514  		return nil, err
  2515  	}
  2516  
  2517  	err = o.setMetaData(info)
  2518  	if err != nil {
  2519  		return nil, err
  2520  	}
  2521  	// Set the mod time now and read metadata
  2522  	info, err = o.fs.fetchAndUpdateMetadata(ctx, src, options, o)
  2523  	return info, o.setMetaData(info)
  2524  }
  2525  
  2526  // Update the object with the contents of the io.Reader, modTime and size
  2527  //
  2528  // The new object may have been created if an error is returned
  2529  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  2530  	if o.hasMetaData && o.isOneNoteFile {
  2531  		return errors.New("can't upload content to a OneNote file")
  2532  	}
  2533  
  2534  	o.fs.tokenRenewer.Start()
  2535  	defer o.fs.tokenRenewer.Stop()
  2536  
  2537  	size := src.Size()
  2538  
  2539  	var info *api.Item
  2540  	if size > 0 {
  2541  		info, err = o.uploadMultipart(ctx, in, src, options...)
  2542  	} else if size == 0 {
  2543  		info, err = o.uploadSinglepart(ctx, in, src, options...)
  2544  	} else {
  2545  		return errors.New("unknown-sized upload not supported")
  2546  	}
  2547  	if err != nil {
  2548  		fs.PrettyPrint(info, "info from Update error", fs.LogLevelDebug)
  2549  		return err
  2550  	}
  2551  
  2552  	// If updating the file then remove versions
  2553  	if o.fs.opt.NoVersions && o.hasMetaData {
  2554  		err = o.deleteVersions(ctx)
  2555  		if err != nil {
  2556  			fs.Errorf(o, "Failed to remove versions: %v", err)
  2557  		}
  2558  	}
  2559  	return nil
  2560  }
  2561  
  2562  // Remove an object
  2563  func (o *Object) Remove(ctx context.Context) error {
  2564  	return o.fs.deleteObject(ctx, o.id)
  2565  }
  2566  
  2567  // MimeType of an Object if known, "" otherwise
  2568  func (o *Object) MimeType(ctx context.Context) string {
  2569  	return o.mimeType
  2570  }
  2571  
  2572  // ID returns the ID of the Object if known, or "" if not
  2573  func (o *Object) ID() string {
  2574  	return o.id
  2575  }
  2576  
  2577  /*
  2578   *       URL Build routine area start
  2579   *       1. In this area, region-related URL rewrites are applied. As the API is blackbox,
  2580   *          we cannot thoroughly test this part. Please be extremely careful while changing them.
  2581   *       2. If possible, please don't introduce region related code in other region, but patch these helper functions.
  2582   *       3. To avoid region-related issues, please don't manually build rest.Opts from scratch.
  2583   *          Instead, use these helper function, and customize the URL afterwards if needed.
  2584   *
  2585   *       currently, the Vnet Group's API differs in the following places:
  2586   *       - https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route}
  2587   *           - this API doesn't work (gives invalid request)
  2588   *           - can be replaced with the following API:
  2589   *               - https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
  2590   *                   - however, this API does NOT support multi-level leaf like a/b/c
  2591   *               - https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'")
  2592   *                   - this API does support multi-level leaf like a/b/c
  2593   *       - https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
  2594   *	         - Same as above
  2595   */
  2596  
  2597  // parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
  2598  // and returns itemID, driveID, rootURL.
  2599  // Such a normalized ID can come from (*Item).GetID()
  2600  func (f *Fs) parseNormalizedID(ID string) (string, string, string) {
  2601  	rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
  2602  	if strings.Contains(ID, "#") {
  2603  		s := strings.Split(ID, "#")
  2604  		return s[1], s[0], rootURL
  2605  	}
  2606  	return ID, "", ""
  2607  }
  2608  
  2609  // newOptsCall build the rest.Opts structure with *a normalizedID(driveID#fileID, or simply fileID)*
  2610  // using url template https://{Endpoint}/drives/{driveID}/items/{itemID}/{route}
  2611  func (f *Fs) newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
  2612  	id, drive, rootURL := f.parseNormalizedID(normalizedID)
  2613  
  2614  	if drive != "" {
  2615  		return rest.Opts{
  2616  			Method:  method,
  2617  			RootURL: rootURL,
  2618  			Path:    "/" + drive + "/items/" + id + route,
  2619  		}
  2620  	}
  2621  	return rest.Opts{
  2622  		Method: method,
  2623  		Path:   "/items/" + id + route,
  2624  	}
  2625  }
  2626  
  2627  func escapeSingleQuote(str string) string {
  2628  	return strings.ReplaceAll(str, "'", "''")
  2629  }
  2630  
  2631  // newOptsCallWithIDPath build the rest.Opts structure with *a normalizedID (driveID#fileID, or simply fileID) and leaf*
  2632  // using url template https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route} (for international OneDrive)
  2633  // or https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
  2634  // and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for Vnet Group)
  2635  // if isPath is false, this function will only work when the leaf is "" or a child name (i.e. it doesn't accept multi-level leaf)
  2636  // if isPath is true, multi-level leaf like a/b/c can be passed
  2637  func (f *Fs) newOptsCallWithIDPath(normalizedID string, leaf string, isPath bool, method string, route string) (opts rest.Opts, ok bool) {
  2638  	encoder := f.opt.Enc.FromStandardName
  2639  	if isPath {
  2640  		encoder = f.opt.Enc.FromStandardPath
  2641  	}
  2642  	trueDirID, drive, rootURL := f.parseNormalizedID(normalizedID)
  2643  	if drive == "" {
  2644  		trueDirID = normalizedID
  2645  	}
  2646  	entity := "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(encoder(leaf))) + route
  2647  	if f.opt.Region == regionCN {
  2648  		if isPath {
  2649  			entity = "/items/" + trueDirID + "/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+encoder(escapeSingleQuote(leaf))+"'")
  2650  		} else {
  2651  			entity = "/items/" + trueDirID + "/children('" + rest.URLPathEscape(encoder(escapeSingleQuote(leaf))) + "')" + route
  2652  		}
  2653  	}
  2654  	if drive == "" {
  2655  		ok = false
  2656  		opts = rest.Opts{
  2657  			Method: method,
  2658  			Path:   entity,
  2659  		}
  2660  		return
  2661  	}
  2662  	ok = true
  2663  	opts = rest.Opts{
  2664  		Method:  method,
  2665  		RootURL: rootURL,
  2666  		Path:    "/" + drive + entity,
  2667  	}
  2668  	return
  2669  }
  2670  
  2671  // newOptsCallWithIDPath build the rest.Opts structure with an *absolute path start from root*
  2672  // using url template https://{Endpoint}/drives/{driveID}/root:/{path}:/{route}
  2673  // or https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
  2674  func (f *Fs) newOptsCallWithRootPath(path string, method string, route string) (opts rest.Opts) {
  2675  	path = strings.TrimSuffix(path, "/")
  2676  	newURL := "/root:/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(path))) + route
  2677  	if f.opt.Region == regionCN {
  2678  		newURL = "/root/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+escapeSingleQuote(f.opt.Enc.FromStandardPath(path))+"'")
  2679  	}
  2680  	return rest.Opts{
  2681  		Method: method,
  2682  		Path:   newURL,
  2683  	}
  2684  }
  2685  
  2686  // newOptsCallWithPath build the rest.Opt intelligently.
  2687  // It will first try to resolve the path using dircache, which enables support for "Share with me" files.
  2688  // If present in cache, then use ID + Path variant, else fallback into RootPath variant
  2689  func (f *Fs) newOptsCallWithPath(ctx context.Context, path string, method string, route string) (opts rest.Opts) {
  2690  	if path == "" {
  2691  		url := "/root" + route
  2692  		return rest.Opts{
  2693  			Method: method,
  2694  			Path:   url,
  2695  		}
  2696  	}
  2697  
  2698  	// find dircache
  2699  	leaf, directoryID, _ := f.dirCache.FindPath(ctx, path, false)
  2700  	// try to use IDPath variant first
  2701  	if opts, ok := f.newOptsCallWithIDPath(directoryID, leaf, false, method, route); ok {
  2702  		return opts
  2703  	}
  2704  	// fallback to use RootPath variant first
  2705  	return f.newOptsCallWithRootPath(path, method, route)
  2706  }
  2707  
  2708  /*
  2709   *       URL Build routine area end
  2710   */
  2711  
  2712  // Returns the canonical form of the driveID
  2713  func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
  2714  	if driveID == "" {
  2715  		canonicalDriveID = f.opt.DriveID
  2716  	} else {
  2717  		canonicalDriveID = driveID
  2718  	}
  2719  	canonicalDriveID = strings.ToLower(canonicalDriveID)
  2720  	return canonicalDriveID
  2721  }
  2722  
  2723  // ChangeNotify calls the passed function with a path that has had changes.
  2724  // If the implementation uses polling, it should adhere to the given interval.
  2725  //
  2726  // Automatically restarts itself in case of unexpected behavior of the remote.
  2727  //
  2728  // Close the returned channel to stop being notified.
  2729  //
  2730  // The Onedrive implementation gives the whole hierarchy up to the  top when
  2731  // an object is changed. For instance, if a/b/c is changed, this function
  2732  // will call notifyFunc with a, a/b and a/b/c.
  2733  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
  2734  	go func() {
  2735  		// get the StartPageToken early so all changes from now on get processed
  2736  		nextDeltaToken, err := f.changeNotifyStartPageToken(ctx)
  2737  		if err != nil {
  2738  			fs.Errorf(f, "Could not get first deltaLink: %s", err)
  2739  			return
  2740  		}
  2741  
  2742  		fs.Debugf(f, "Next delta token is: %s", nextDeltaToken)
  2743  
  2744  		var ticker *time.Ticker
  2745  		var tickerC <-chan time.Time
  2746  		for {
  2747  			select {
  2748  			case pollInterval, ok := <-pollIntervalChan:
  2749  				if !ok {
  2750  					if ticker != nil {
  2751  						ticker.Stop()
  2752  					}
  2753  					return
  2754  				}
  2755  				if ticker != nil {
  2756  					ticker.Stop()
  2757  					ticker, tickerC = nil, nil
  2758  				}
  2759  				if pollInterval != 0 {
  2760  					ticker = time.NewTicker(pollInterval)
  2761  					tickerC = ticker.C
  2762  				}
  2763  			case <-tickerC:
  2764  				fs.Debugf(f, "Checking for changes on remote")
  2765  				nextDeltaToken, err = f.changeNotifyRunner(ctx, notifyFunc, nextDeltaToken)
  2766  				if err != nil {
  2767  					fs.Infof(f, "Change notify listener failure: %s", err)
  2768  				}
  2769  			}
  2770  		}
  2771  	}()
  2772  }
  2773  
  2774  func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (nextDeltaToken string, err error) {
  2775  	delta, err := f.changeNotifyNextChange(ctx, "latest")
  2776  	if err != nil {
  2777  		return
  2778  	}
  2779  	parsedURL, err := url.Parse(delta.DeltaLink)
  2780  	if err != nil {
  2781  		return
  2782  	}
  2783  	nextDeltaToken = parsedURL.Query().Get("token")
  2784  	return
  2785  }
  2786  
  2787  func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta api.DeltaResponse, err error) {
  2788  	opts := f.buildDriveDeltaOpts(token)
  2789  
  2790  	_, err = f.srv.CallJSON(ctx, &opts, nil, &delta)
  2791  
  2792  	return
  2793  }
  2794  
  2795  func (f *Fs) buildDriveDeltaOpts(token string) rest.Opts {
  2796  	rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
  2797  
  2798  	return rest.Opts{
  2799  		Method:     "GET",
  2800  		RootURL:    rootURL,
  2801  		Path:       "/" + f.driveID + "/root/delta",
  2802  		Parameters: map[string][]string{"token": {token}},
  2803  	}
  2804  }
  2805  
  2806  func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), deltaToken string) (nextDeltaToken string, err error) {
  2807  	delta, err := f.changeNotifyNextChange(ctx, deltaToken)
  2808  	if err != nil {
  2809  		return
  2810  	}
  2811  	parsedURL, err := url.Parse(delta.DeltaLink)
  2812  	if err != nil {
  2813  		return
  2814  	}
  2815  	nextDeltaToken = parsedURL.Query().Get("token")
  2816  
  2817  	for _, item := range delta.Value {
  2818  		isDriveRootFolder := item.GetParentReference().ID == ""
  2819  		if isDriveRootFolder {
  2820  			continue
  2821  		}
  2822  
  2823  		fullPath, err := getItemFullPath(&item)
  2824  		if err != nil {
  2825  			fs.Errorf(f, "Could not get item full path: %s", err)
  2826  			continue
  2827  		}
  2828  
  2829  		if fullPath == f.root {
  2830  			continue
  2831  		}
  2832  
  2833  		relName, insideRoot := getRelativePathInsideBase(f.root, fullPath)
  2834  		if !insideRoot {
  2835  			continue
  2836  		}
  2837  
  2838  		if item.GetFile() != nil {
  2839  			notifyFunc(relName, fs.EntryObject)
  2840  		} else if item.GetFolder() != nil {
  2841  			notifyFunc(relName, fs.EntryDirectory)
  2842  		}
  2843  	}
  2844  
  2845  	return
  2846  }
  2847  
  2848  func getItemFullPath(item *api.Item) (fullPath string, err error) {
  2849  	err = nil
  2850  	fullPath = item.GetName()
  2851  	if parent := item.GetParentReference(); parent != nil && parent.Path != "" {
  2852  		pathParts := strings.SplitN(parent.Path, ":", 2)
  2853  		if len(pathParts) != 2 {
  2854  			err = fmt.Errorf("invalid parent path: %s", parent.Path)
  2855  			return
  2856  		}
  2857  
  2858  		if pathParts[1] != "" {
  2859  			fullPath = strings.TrimPrefix(pathParts[1], "/") + "/" + fullPath
  2860  		}
  2861  	}
  2862  	return
  2863  }
  2864  
  2865  // getRelativePathInsideBase checks if `target` is inside `base`. If so, it
  2866  // returns a relative path for `target` based on `base` and a boolean `true`.
  2867  // Otherwise returns "", false.
  2868  func getRelativePathInsideBase(base, target string) (string, bool) {
  2869  	if base == "" {
  2870  		return target, true
  2871  	}
  2872  
  2873  	baseSlash := base + "/"
  2874  	if strings.HasPrefix(target+"/", baseSlash) {
  2875  		return target[len(baseSlash):], true
  2876  	}
  2877  	return "", false
  2878  }
  2879  
  2880  // Adds a ":" at the end of `remotePath` in a proper manner.
  2881  // If `remotePath` already ends with "/", change it to ":/"
  2882  // If `remotePath` is "", return "".
  2883  // A workaround for #2720 and #3039
  2884  func withTrailingColon(remotePath string) string {
  2885  	if remotePath == "" {
  2886  		return ""
  2887  	}
  2888  
  2889  	if strings.HasSuffix(remotePath, "/") {
  2890  		return remotePath[:len(remotePath)-1] + ":/"
  2891  	}
  2892  	return remotePath + ":"
  2893  }
  2894  
  2895  // Check the interfaces are satisfied
  2896  var (
  2897  	_ fs.Fs              = (*Fs)(nil)
  2898  	_ fs.Purger          = (*Fs)(nil)
  2899  	_ fs.Copier          = (*Fs)(nil)
  2900  	_ fs.Mover           = (*Fs)(nil)
  2901  	_ fs.DirMover        = (*Fs)(nil)
  2902  	_ fs.DirCacheFlusher = (*Fs)(nil)
  2903  	_ fs.Abouter         = (*Fs)(nil)
  2904  	_ fs.PublicLinker    = (*Fs)(nil)
  2905  	_ fs.CleanUpper      = (*Fs)(nil)
  2906  	_ fs.ListRer         = (*Fs)(nil)
  2907  	_ fs.Shutdowner      = (*Fs)(nil)
  2908  	_ fs.Object          = (*Object)(nil)
  2909  	_ fs.MimeTyper       = &Object{}
  2910  	_ fs.IDer            = &Object{}
  2911  	_ fs.Metadataer      = (*Object)(nil)
  2912  	_ fs.Metadataer      = (*Directory)(nil)
  2913  	_ fs.SetModTimer     = (*Directory)(nil)
  2914  	_ fs.SetMetadataer   = (*Directory)(nil)
  2915  	_ fs.MimeTyper       = &Directory{}
  2916  	_ fs.DirSetModTimer  = (*Fs)(nil)
  2917  	_ fs.MkdirMetadataer = (*Fs)(nil)
  2918  )