github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/backend/sharefile/sharefile.go (about) 1 // Package sharefile provides an interface to the Citrix Sharefile 2 // object storage system. 3 package sharefile 4 5 //go:generate ./update-timezone.sh 6 7 /* NOTES 8 9 ## for docs 10 11 Detail standard/chunked/streaming uploads? 12 13 ## Bugs in API 14 15 The times in updateItem are being parsed in EST/DST local time 16 updateItem only sets times accurate to 1 second 17 18 https://community.sharefilesupport.com/citrixsharefile/topics/bug-report-for-update-item-patch-items-id-setting-clientmodifieddate-ignores-timezone-and-milliseconds 19 20 When doing a rename+move directory, the server appears to do the 21 rename first in the local directory which can overwrite files of the 22 same name in the local directory. 23 24 https://community.sharefilesupport.com/citrixsharefile/topics/bug-report-for-update-item-patch-items-id-file-overwrite-under-certain-conditions 25 26 The Copy command can't change the name at the same time which means we 27 have to copy via a temporary directory. 28 29 https://community.sharefilesupport.com/citrixsharefile/topics/copy-item-needs-to-be-able-to-set-a-new-name 30 31 ## Allowed characters 32 33 https://api.sharefile.com/rest/index/odata.aspx 34 35 $select to limit returned fields 36 https://www.odata.org/documentation/odata-version-3-0/odata-version-3-0-core-protocol/#theselectsystemqueryoption 37 38 Also $filter to select only things we need 39 40 https://support.citrix.com/article/CTX234774 41 42 The following characters should not be used in folder or file names. 43 44 \ 45 / 46 . 47 , 48 : 49 ; 50 * 51 ? 52 " 53 < 54 > 55 A filename ending with a period without an extension 56 File names with leading or trailing whitespaces. 57 58 59 // sharefile 60 stringNeedsEscaping = []byte{ 61 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x2A, 0x2E, 0x2F, 0x3A, 0x3C, 0x3E, 0x3F, 0x7C, 0xEFBCBC 62 } 63 maxFileLength = 256 64 canWriteUnnormalized = true 65 canReadUnnormalized = true 66 canReadRenormalized = false 67 canStream = true 68 69 Which is control chars + [' ', '*', '.', '/', ':', '<', '>', '?', '|'] 70 - also \ and " 71 72 */ 73 74 import ( 75 "context" 76 "encoding/json" 77 "fmt" 78 "io" 79 "io/ioutil" 80 "log" 81 "net/http" 82 "net/url" 83 "path" 84 "strings" 85 "time" 86 87 "github.com/pkg/errors" 88 "github.com/rclone/rclone/backend/sharefile/api" 89 "github.com/rclone/rclone/fs" 90 "github.com/rclone/rclone/fs/config" 91 "github.com/rclone/rclone/fs/config/configmap" 92 "github.com/rclone/rclone/fs/config/configstruct" 93 "github.com/rclone/rclone/fs/config/obscure" 94 "github.com/rclone/rclone/fs/fserrors" 95 "github.com/rclone/rclone/fs/hash" 96 "github.com/rclone/rclone/lib/dircache" 97 "github.com/rclone/rclone/lib/encoder" 98 "github.com/rclone/rclone/lib/oauthutil" 99 "github.com/rclone/rclone/lib/pacer" 100 "github.com/rclone/rclone/lib/random" 101 "github.com/rclone/rclone/lib/rest" 102 "golang.org/x/oauth2" 103 ) 104 105 const ( 106 rcloneClientID = "djQUPlHTUM9EvayYBWuKC5IrVIoQde46" 107 rcloneEncryptedClientSecret = "v7572bKhUindQL3yDnUAebmgP-QxiwT38JLxVPolcZBl6SSs329MtFzH73x7BeELmMVZtneUPvALSopUZ6VkhQ" 108 minSleep = 10 * time.Millisecond 109 maxSleep = 2 * time.Second 110 decayConstant = 2 // bigger for slower decay, exponential 111 apiPath = "/sf/v3" // add to endpoint to get API path 112 tokenPath = "/oauth/token" // add to endpoint to get Token path 113 minChunkSize = 256 * fs.KibiByte 114 maxChunkSize = 2 * fs.GibiByte 115 defaultChunkSize = 64 * fs.MebiByte 116 defaultUploadCutoff = 128 * fs.MebiByte 117 ) 118 119 // Generate a new oauth2 config which we will update when we know the TokenURL 120 func newOauthConfig(tokenURL string) *oauth2.Config { 121 return &oauth2.Config{ 122 Scopes: nil, 123 Endpoint: oauth2.Endpoint{ 124 AuthURL: "https://secure.sharefile.com/oauth/authorize", 125 TokenURL: tokenURL, 126 }, 127 ClientID: rcloneClientID, 128 ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), 129 RedirectURL: oauthutil.RedirectPublicSecureURL, 130 } 131 } 132 133 // Register with Fs 134 func init() { 135 fs.Register(&fs.RegInfo{ 136 Name: "sharefile", 137 Description: "Citrix Sharefile", 138 NewFs: NewFs, 139 Config: func(name string, m configmap.Mapper) { 140 oauthConfig := newOauthConfig("") 141 checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error { 142 if auth == nil || auth.Form == nil { 143 return errors.New("endpoint not found in response") 144 } 145 subdomain := auth.Form.Get("subdomain") 146 apicp := auth.Form.Get("apicp") 147 if subdomain == "" || apicp == "" { 148 return errors.Errorf("subdomain or apicp not found in response: %+v", auth.Form) 149 } 150 endpoint := "https://" + subdomain + "." + apicp 151 m.Set("endpoint", endpoint) 152 oauthConfig.Endpoint.TokenURL = endpoint + tokenPath 153 return nil 154 } 155 err := oauthutil.ConfigWithCallback("sharefile", name, m, oauthConfig, checkAuth) 156 if err != nil { 157 log.Fatalf("Failed to configure token: %v", err) 158 } 159 }, 160 Options: []fs.Option{{ 161 Name: "upload_cutoff", 162 Help: "Cutoff for switching to multipart upload.", 163 Default: defaultUploadCutoff, 164 Advanced: true, 165 }, { 166 Name: "root_folder_id", 167 Help: `ID of the root folder 168 169 Leave blank to access "Personal Folders". You can use one of the 170 standard values here or any folder ID (long hex number ID).`, 171 Examples: []fs.OptionExample{{ 172 Value: "", 173 Help: `Access the Personal Folders. (Default)`, 174 }, { 175 Value: "favorites", 176 Help: "Access the Favorites folder.", 177 }, { 178 Value: "allshared", 179 Help: "Access all the shared folders.", 180 }, { 181 Value: "connectors", 182 Help: "Access all the individual connectors.", 183 }, { 184 Value: "top", 185 Help: "Access the home, favorites, and shared folders as well as the connectors.", 186 }}, 187 }, { 188 Name: "chunk_size", 189 Default: defaultChunkSize, 190 Help: `Upload chunk size. Must a power of 2 >= 256k. 191 192 Making this larger will improve performance, but note that each chunk 193 is buffered in memory one per transfer. 194 195 Reducing this will reduce memory usage but decrease performance.`, 196 Advanced: true, 197 }, { 198 Name: "endpoint", 199 Help: `Endpoint for API calls. 200 201 This is usually auto discovered as part of the oauth process, but can 202 be set manually to something like: https://XXX.sharefile.com 203 `, 204 Advanced: true, 205 Default: "", 206 }, { 207 Name: config.ConfigEncoding, 208 Help: config.ConfigEncodingHelp, 209 Advanced: true, 210 Default: (encoder.Base | 211 encoder.EncodeWin | // :?"*<>| 212 encoder.EncodeBackSlash | // \ 213 encoder.EncodeCtl | 214 encoder.EncodeRightSpace | 215 encoder.EncodeRightPeriod | 216 encoder.EncodeLeftSpace | 217 encoder.EncodeLeftPeriod | 218 encoder.EncodeInvalidUtf8), 219 }}, 220 }) 221 } 222 223 // Options defines the configuration for this backend 224 type Options struct { 225 RootFolderID string `config:"root_folder_id"` 226 UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` 227 ChunkSize fs.SizeSuffix `config:"chunk_size"` 228 Endpoint string `config:"endpoint"` 229 Enc encoder.MultiEncoder `config:"encoding"` 230 } 231 232 // Fs represents a remote cloud storage system 233 type Fs struct { 234 name string // name of this remote 235 root string // the path we are working on 236 opt Options // parsed options 237 features *fs.Features // optional features 238 srv *rest.Client // the connection to the server 239 dirCache *dircache.DirCache // Map of directory path to directory id 240 pacer *fs.Pacer // pacer for API calls 241 bufferTokens chan []byte // control concurrency of multipart uploads 242 tokenRenewer *oauthutil.Renew // renew the token on expiry 243 rootID string // ID of the users root folder 244 location *time.Location // timezone of server for SetModTime workaround 245 } 246 247 // Object describes a file 248 type Object struct { 249 fs *Fs // what this object is part of 250 remote string // The remote path 251 hasMetaData bool // metadata is present and correct 252 size int64 // size of the object 253 modTime time.Time // modification time of the object 254 id string // ID of the object 255 md5 string // hash of the object 256 } 257 258 // ------------------------------------------------------------ 259 260 // Name of the remote (as passed into NewFs) 261 func (f *Fs) Name() string { 262 return f.name 263 } 264 265 // Root of the remote (as passed into NewFs) 266 func (f *Fs) Root() string { 267 return f.root 268 } 269 270 // String converts this Fs to a string 271 func (f *Fs) String() string { 272 return fmt.Sprintf("sharefile root '%s'", f.root) 273 } 274 275 // Features returns the optional features of this Fs 276 func (f *Fs) Features() *fs.Features { 277 return f.features 278 } 279 280 // parsePath parses a sharefile 'url' 281 func parsePath(path string) (root string) { 282 root = strings.Trim(path, "/") 283 return 284 } 285 286 // retryErrorCodes is a slice of error codes that we will retry 287 var retryErrorCodes = []int{ 288 429, // Too Many Requests. 289 500, // Internal Server Error 290 502, // Bad Gateway 291 503, // Service Unavailable 292 504, // Gateway Timeout 293 509, // Bandwidth Limit Exceeded 294 } 295 296 // shouldRetry returns a boolean as to whether this resp and err 297 // deserve to be retried. It returns the err as a convenience 298 func shouldRetry(resp *http.Response, err error) (bool, error) { 299 return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err 300 } 301 302 // Reads the metadata for the id passed in. If id is "" then it returns the root 303 // if path is not "" then the item read use id as the root and the path is relative 304 func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { 305 opts := rest.Opts{ 306 Method: "GET", 307 Path: "/Items", 308 Parameters: url.Values{ 309 "$select": {api.ListRequestSelect}, 310 }, 311 } 312 if id != "" { 313 opts.Path += "(" + id + ")" 314 } 315 if path != "" { 316 opts.Path += "/ByPath" 317 opts.Parameters.Set("path", "/"+f.opt.Enc.FromStandardPath(path)) 318 } 319 var item api.Item 320 var resp *http.Response 321 err = f.pacer.Call(func() (bool, error) { 322 resp, err = f.srv.CallJSON(ctx, &opts, nil, &item) 323 return shouldRetry(resp, err) 324 }) 325 if err != nil { 326 if resp != nil && resp.StatusCode == http.StatusNotFound { 327 if filesOnly { 328 return nil, fs.ErrorObjectNotFound 329 } 330 return nil, fs.ErrorDirNotFound 331 } 332 return nil, errors.Wrap(err, "couldn't find item") 333 } 334 if directoriesOnly && item.Type != api.ItemTypeFolder { 335 return nil, fs.ErrorIsFile 336 } 337 if filesOnly && item.Type != api.ItemTypeFile { 338 return nil, fs.ErrorNotAFile 339 } 340 return &item, nil 341 } 342 343 // Reads the metadata for the id passed in. If id is "" then it returns the root 344 func (f *Fs) readMetaDataForID(ctx context.Context, id string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { 345 return f.readMetaDataForIDPath(ctx, id, "", directoriesOnly, filesOnly) 346 } 347 348 // readMetaDataForPath reads the metadata from the path 349 func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) { 350 leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false) 351 if err != nil { 352 if err == fs.ErrorDirNotFound { 353 return nil, fs.ErrorObjectNotFound 354 } 355 return nil, err 356 } 357 return f.readMetaDataForIDPath(ctx, directoryID, leaf, directoriesOnly, filesOnly) 358 } 359 360 // errorHandler parses a non 2xx error response into an error 361 func errorHandler(resp *http.Response) error { 362 body, err := rest.ReadBody(resp) 363 if err != nil { 364 body = nil 365 } 366 var e = api.Error{ 367 Code: fmt.Sprint(resp.StatusCode), 368 Reason: resp.Status, 369 } 370 e.Message.Lang = "en" 371 e.Message.Value = string(body) 372 if body != nil { 373 _ = json.Unmarshal(body, &e) 374 } 375 return &e 376 } 377 378 func checkUploadChunkSize(cs fs.SizeSuffix) error { 379 if cs < minChunkSize { 380 return errors.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize) 381 } 382 if cs > maxChunkSize { 383 return errors.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize) 384 } 385 return nil 386 } 387 388 func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { 389 err = checkUploadChunkSize(cs) 390 if err == nil { 391 old, f.opt.ChunkSize = f.opt.ChunkSize, cs 392 f.fillBufferTokens() // reset the buffer tokens 393 } 394 return 395 } 396 397 func checkUploadCutoff(cs fs.SizeSuffix) error { 398 return nil 399 } 400 401 func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { 402 err = checkUploadCutoff(cs) 403 if err == nil { 404 old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs 405 } 406 return 407 } 408 409 // NewFs constructs an Fs from the path, container:path 410 func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { 411 ctx := context.Background() 412 // Parse config into Options struct 413 opt := new(Options) 414 err := configstruct.Set(m, opt) 415 if err != nil { 416 return nil, err 417 } 418 419 // Check parameters OK 420 if opt.Endpoint == "" { 421 return nil, errors.New("endpoint not set: rebuild the remote or set manually") 422 } 423 err = checkUploadChunkSize(opt.ChunkSize) 424 if err != nil { 425 return nil, err 426 } 427 err = checkUploadCutoff(opt.UploadCutoff) 428 if err != nil { 429 return nil, err 430 } 431 432 root = parsePath(root) 433 434 oauthConfig := newOauthConfig(opt.Endpoint + tokenPath) 435 var client *http.Client 436 var ts *oauthutil.TokenSource 437 client, ts, err = oauthutil.NewClient(name, m, oauthConfig) 438 if err != nil { 439 return nil, errors.Wrap(err, "failed to configure sharefile") 440 } 441 442 f := &Fs{ 443 name: name, 444 root: root, 445 opt: *opt, 446 srv: rest.NewClient(client).SetRoot(opt.Endpoint + apiPath), 447 pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), 448 } 449 f.features = (&fs.Features{ 450 CaseInsensitive: true, 451 CanHaveEmptyDirectories: true, 452 ReadMimeType: false, 453 }).Fill(f) 454 f.srv.SetErrorHandler(errorHandler) 455 f.fillBufferTokens() 456 457 // Renew the token in the background 458 if ts != nil { 459 f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { 460 _, err := f.List(ctx, "") 461 return err 462 }) 463 } 464 465 // Load the server timezone from an internal file 466 // Used to correct the time in SetModTime 467 const serverTimezone = "America/New_York" 468 timezone, err := tzdata.Open(serverTimezone) 469 if err != nil { 470 return nil, errors.Wrap(err, "failed to open timezone db") 471 } 472 tzdata, err := ioutil.ReadAll(timezone) 473 if err != nil { 474 return nil, errors.Wrap(err, "failed to read timezone") 475 } 476 _ = timezone.Close() 477 f.location, err = time.LoadLocationFromTZData(serverTimezone, tzdata) 478 if err != nil { 479 return nil, errors.Wrap(err, "failed to load location from timezone") 480 } 481 482 // Find ID of user's root folder 483 if opt.RootFolderID == "" { 484 item, err := f.readMetaDataForID(ctx, opt.RootFolderID, true, false) 485 if err != nil { 486 return nil, errors.Wrap(err, "couldn't find root ID") 487 } 488 f.rootID = item.ID 489 } else { 490 f.rootID = opt.RootFolderID 491 } 492 493 // Get rootID 494 f.dirCache = dircache.New(root, f.rootID, f) 495 496 // Find the current root 497 err = f.dirCache.FindRoot(ctx, false) 498 if err != nil { 499 // Assume it is a file 500 newRoot, remote := dircache.SplitPath(root) 501 tempF := *f 502 tempF.dirCache = dircache.New(newRoot, f.rootID, &tempF) 503 tempF.root = newRoot 504 // Make new Fs which is the parent 505 err = tempF.dirCache.FindRoot(ctx, false) 506 if err != nil { 507 // No root so return old f 508 return f, nil 509 } 510 _, err := tempF.newObjectWithInfo(ctx, remote, nil) 511 if err != nil { 512 if err == fs.ErrorObjectNotFound { 513 // File doesn't exist so return old f 514 return f, nil 515 } 516 return nil, err 517 } 518 f.features.Fill(&tempF) 519 // XXX: update the old f here instead of returning tempF, since 520 // `features` were already filled with functions having *f as a receiver. 521 // See https://github.com/rclone/rclone/issues/2182 522 f.dirCache = tempF.dirCache 523 f.root = tempF.root 524 // return an error with an fs which points to the parent 525 return f, fs.ErrorIsFile 526 } 527 return f, nil 528 } 529 530 // Fill up (or reset) the buffer tokens 531 func (f *Fs) fillBufferTokens() { 532 f.bufferTokens = make(chan []byte, fs.Config.Transfers) 533 for i := 0; i < fs.Config.Transfers; i++ { 534 f.bufferTokens <- nil 535 } 536 } 537 538 // getUploadBlock gets a block from the pool of size chunkSize 539 func (f *Fs) getUploadBlock() []byte { 540 buf := <-f.bufferTokens 541 if buf == nil { 542 buf = make([]byte, f.opt.ChunkSize) 543 } 544 // fs.Debugf(f, "Getting upload block %p", buf) 545 return buf 546 } 547 548 // putUploadBlock returns a block to the pool of size chunkSize 549 func (f *Fs) putUploadBlock(buf []byte) { 550 buf = buf[:cap(buf)] 551 if len(buf) != int(f.opt.ChunkSize) { 552 panic("bad blocksize returned to pool") 553 } 554 // fs.Debugf(f, "Returning upload block %p", buf) 555 f.bufferTokens <- buf 556 } 557 558 // Return an Object from a path 559 // 560 // If it can't be found it returns the error fs.ErrorObjectNotFound. 561 func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { 562 o := &Object{ 563 fs: f, 564 remote: remote, 565 } 566 var err error 567 if info != nil { 568 // Set info 569 err = o.setMetaData(info) 570 } else { 571 err = o.readMetaData(ctx) // reads info and meta, returning an error 572 } 573 if err != nil { 574 return nil, err 575 } 576 return o, nil 577 } 578 579 // NewObject finds the Object at remote. If it can't be found 580 // it returns the error fs.ErrorObjectNotFound. 581 func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { 582 return f.newObjectWithInfo(ctx, remote, nil) 583 } 584 585 // FindLeaf finds a directory of name leaf in the folder with ID pathID 586 func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { 587 if pathID == "top" { 588 // Find the leaf in pathID 589 found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { 590 if item.Name == leaf { 591 pathIDOut = item.ID 592 return true 593 } 594 return false 595 }) 596 return pathIDOut, found, err 597 } 598 info, err := f.readMetaDataForIDPath(ctx, pathID, leaf, true, false) 599 if err == nil { 600 found = true 601 pathIDOut = info.ID 602 } else if err == fs.ErrorDirNotFound { 603 err = nil // don't return an error if not found 604 } 605 return pathIDOut, found, err 606 } 607 608 // CreateDir makes a directory with pathID as parent and name leaf 609 func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { 610 var resp *http.Response 611 leaf = f.opt.Enc.FromStandardName(leaf) 612 var req = api.Item{ 613 Name: leaf, 614 FileName: leaf, 615 CreatedAt: time.Now(), 616 } 617 var info api.Item 618 opts := rest.Opts{ 619 Method: "POST", 620 Path: "/Items(" + pathID + ")/Folder", 621 Parameters: url.Values{ 622 "$select": {api.ListRequestSelect}, 623 "overwrite": {"false"}, 624 "passthrough": {"false"}, 625 }, 626 } 627 err = f.pacer.Call(func() (bool, error) { 628 resp, err = f.srv.CallJSON(ctx, &opts, &req, &info) 629 return shouldRetry(resp, err) 630 }) 631 if err != nil { 632 return "", errors.Wrap(err, "CreateDir") 633 } 634 return info.ID, nil 635 } 636 637 // list the objects into the function supplied 638 // 639 // If directories is set it only sends directories 640 // User function to process a File item from listAll 641 // 642 // Should return true to finish processing 643 type listAllFn func(*api.Item) bool 644 645 // Lists the directory required calling the user function on each item found 646 // 647 // If the user fn ever returns true then it early exits with found = true 648 func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { 649 opts := rest.Opts{ 650 Method: "GET", 651 Path: "/Items(" + dirID + ")/Children", 652 Parameters: url.Values{ 653 "$select": {api.ListRequestSelect}, 654 }, 655 } 656 657 var result api.ListResponse 658 var resp *http.Response 659 err = f.pacer.Call(func() (bool, error) { 660 resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) 661 return shouldRetry(resp, err) 662 }) 663 if err != nil { 664 return found, errors.Wrap(err, "couldn't list files") 665 } 666 for i := range result.Value { 667 item := &result.Value[i] 668 if item.Type == api.ItemTypeFolder { 669 if filesOnly { 670 continue 671 } 672 } else if item.Type == api.ItemTypeFile { 673 if directoriesOnly { 674 continue 675 } 676 } else { 677 fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) 678 continue 679 } 680 item.Name = f.opt.Enc.ToStandardName(item.Name) 681 if fn(item) { 682 found = true 683 break 684 } 685 } 686 687 return 688 } 689 690 // List the objects and directories in dir into entries. The 691 // entries can be returned in any order but should be for a 692 // complete directory. 693 // 694 // dir should be "" to list the root, and should not have 695 // trailing slashes. 696 // 697 // This should return ErrDirNotFound if the directory isn't 698 // found. 699 func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { 700 err = f.dirCache.FindRoot(ctx, false) 701 if err != nil { 702 return nil, err 703 } 704 directoryID, err := f.dirCache.FindDir(ctx, dir, false) 705 if err != nil { 706 return nil, err 707 } 708 var iErr error 709 _, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { 710 remote := path.Join(dir, info.Name) 711 if info.Type == api.ItemTypeFolder { 712 // cache the directory ID for later lookups 713 f.dirCache.Put(remote, info.ID) 714 d := fs.NewDir(remote, info.CreatedAt).SetID(info.ID).SetSize(info.Size).SetItems(int64(info.FileCount)) 715 entries = append(entries, d) 716 } else if info.Type == api.ItemTypeFile { 717 o, err := f.newObjectWithInfo(ctx, remote, info) 718 if err != nil { 719 iErr = err 720 return true 721 } 722 entries = append(entries, o) 723 } 724 return false 725 }) 726 if err != nil { 727 return nil, err 728 } 729 if iErr != nil { 730 return nil, iErr 731 } 732 return entries, nil 733 } 734 735 // Creates from the parameters passed in a half finished Object which 736 // must have setMetaData called on it 737 // 738 // Returns the object, leaf, directoryID and error 739 // 740 // Used to create new objects 741 func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { 742 // Create the directory for the object if it doesn't exist 743 leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true) 744 if err != nil { 745 return 746 } 747 // Temporary Object under construction 748 o = &Object{ 749 fs: f, 750 remote: remote, 751 } 752 return o, leaf, directoryID, nil 753 } 754 755 // Put the object 756 // 757 // Copy the reader in to the new object which is returned 758 // 759 // The new object may have been created if an error is returned 760 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 761 existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) 762 switch err { 763 case nil: 764 return existingObj, existingObj.Update(ctx, in, src, options...) 765 case fs.ErrorObjectNotFound: 766 // Not found so create it 767 return f.PutUnchecked(ctx, in, src) 768 default: 769 return nil, err 770 } 771 } 772 773 // PutStream uploads to the remote path with the modTime given of indeterminate size 774 func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 775 return f.Put(ctx, in, src, options...) 776 } 777 778 // PutUnchecked the object into the container 779 // 780 // This will produce an error if the object already exists 781 // 782 // Copy the reader in to the new object which is returned 783 // 784 // The new object may have been created if an error is returned 785 func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 786 remote := src.Remote() 787 size := src.Size() 788 modTime := src.ModTime(ctx) 789 790 o, _, _, err := f.createObject(ctx, remote, modTime, size) 791 if err != nil { 792 return nil, err 793 } 794 return o, o.Update(ctx, in, src, options...) 795 } 796 797 // Mkdir creates the container if it doesn't exist 798 func (f *Fs) Mkdir(ctx context.Context, dir string) error { 799 err := f.dirCache.FindRoot(ctx, true) 800 if err != nil { 801 return err 802 } 803 if dir != "" { 804 _, err = f.dirCache.FindDir(ctx, dir, true) 805 } 806 return err 807 } 808 809 // purgeCheck removes the directory, if check is set then it refuses 810 // to do so if it has anything in 811 func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { 812 root := path.Join(f.root, dir) 813 if root == "" { 814 return errors.New("can't purge root directory") 815 } 816 dc := f.dirCache 817 err := dc.FindRoot(ctx, false) 818 if err != nil { 819 return err 820 } 821 rootID, err := dc.FindDir(ctx, dir, false) 822 if err != nil { 823 return err 824 } 825 826 // need to check if empty as it will delete recursively by default 827 if check { 828 found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool { 829 return true 830 }) 831 if err != nil { 832 return errors.Wrap(err, "purgeCheck") 833 } 834 if found { 835 return fs.ErrorDirectoryNotEmpty 836 } 837 } 838 839 err = f.remove(ctx, rootID) 840 f.dirCache.FlushDir(dir) 841 if err != nil { 842 return err 843 } 844 return nil 845 } 846 847 // Rmdir deletes the root folder 848 // 849 // Returns an error if it isn't empty 850 func (f *Fs) Rmdir(ctx context.Context, dir string) error { 851 return f.purgeCheck(ctx, dir, true) 852 } 853 854 // Precision return the precision of this Fs 855 func (f *Fs) Precision() time.Duration { 856 // sharefile returns times accurate to the millisecond, but 857 // for some reason these seem only accurate 2ms. 858 // updateItem seems to only set times accurate to 1 second though. 859 return time.Second // this doesn't appear to be documented anywhere 860 } 861 862 // Purge deletes all the files and the container 863 // 864 // Optional interface: Only implement this if you have a way of 865 // deleting all the files quicker than just running Remove() on the 866 // result of List() 867 func (f *Fs) Purge(ctx context.Context) error { 868 return f.purgeCheck(ctx, "", false) 869 } 870 871 // updateItem patches a file or folder 872 // 873 // if leaf = "" or directoryID = "" or modTime == nil then it will be 874 // left alone 875 // 876 // Note that this seems to work by renaming first, then moving to a 877 // new directory which means that it can overwrite existing objects 878 // :-( 879 func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTime *time.Time) (info *api.Item, err error) { 880 // Move the object 881 opts := rest.Opts{ 882 Method: "PATCH", 883 Path: "/Items(" + id + ")", 884 Parameters: url.Values{ 885 "$select": {api.ListRequestSelect}, 886 "overwrite": {"false"}, 887 }, 888 } 889 leaf = f.opt.Enc.FromStandardName(leaf) 890 // FIXME this appears to be a bug in the API 891 // 892 // If you set the modified time via PATCH then the server 893 // appears to parse it as a local time for America/New_York 894 // 895 // However if you set it when uploading the file then it is fine... 896 // 897 // Also it only sets the time to 1 second resolution where it 898 // uses 1ms resolution elsewhere 899 if modTime != nil && f.location != nil { 900 newTime := modTime.In(f.location) 901 isoTime := newTime.Format(time.RFC3339Nano) 902 // Chop TZ -05:00 off the end and replace with Z 903 isoTime = isoTime[:len(isoTime)-6] + "Z" 904 // Parse it back into a time 905 newModTime, err := time.Parse(time.RFC3339Nano, isoTime) 906 if err != nil { 907 return nil, errors.Wrap(err, "updateItem: time parse") 908 } 909 modTime = &newModTime 910 } 911 update := api.UpdateItemRequest{ 912 Name: leaf, 913 FileName: leaf, 914 ModifiedAt: modTime, 915 } 916 if directoryID != "" { 917 update.Parent = &api.Parent{ 918 ID: directoryID, 919 } 920 } 921 var resp *http.Response 922 err = f.pacer.Call(func() (bool, error) { 923 resp, err = f.srv.CallJSON(ctx, &opts, &update, &info) 924 return shouldRetry(resp, err) 925 }) 926 if err != nil { 927 return nil, err 928 } 929 return info, nil 930 } 931 932 // move a file or folder 933 // 934 // This is complicated by the fact that we can't use updateItem to move 935 // to a different directory AND rename at the same time as it can 936 // overwrite files in the source directory. 937 func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (item *api.Item, err error) { 938 // To demonstrate bug 939 // item, err = f.updateItem(ctx, id, newLeaf, newDirectoryID, nil) 940 // if err != nil { 941 // return nil, errors.Wrap(err, "Move rename leaf") 942 // } 943 // return item, nil 944 doRenameLeaf := oldLeaf != newLeaf 945 doMove := oldDirectoryID != newDirectoryID 946 947 // Now rename the leaf to a temporary name if we are moving to 948 // another directory to make sure we don't overwrite something 949 // in the source directory by accident 950 if doRenameLeaf && doMove { 951 tmpLeaf := newLeaf + "." + random.String(8) 952 item, err = f.updateItem(ctx, id, tmpLeaf, "", nil) 953 if err != nil { 954 return nil, errors.Wrap(err, "Move rename leaf") 955 } 956 } 957 958 // Move the object to a new directory (with the existing name) 959 // if required 960 if doMove { 961 item, err = f.updateItem(ctx, id, "", newDirectoryID, nil) 962 if err != nil { 963 return nil, errors.Wrap(err, "Move directory") 964 } 965 } 966 967 // Rename the leaf to its final name if required 968 if doRenameLeaf { 969 item, err = f.updateItem(ctx, id, newLeaf, "", nil) 970 if err != nil { 971 return nil, errors.Wrap(err, "Move rename leaf") 972 } 973 } 974 975 return item, nil 976 } 977 978 // Move src to this remote using server side move operations. 979 // 980 // This is stored with the remote path given 981 // 982 // It returns the destination Object and a possible error 983 // 984 // Will only be called if src.Fs().Name() == f.Name() 985 // 986 // If it isn't possible then return fs.ErrorCantMove 987 func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 988 srcObj, ok := src.(*Object) 989 if !ok { 990 fs.Debugf(src, "Can't move - not same remote type") 991 return nil, fs.ErrorCantMove 992 } 993 994 // Find ID of src parent, not creating subdirs 995 srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) 996 if err != nil { 997 return nil, err 998 } 999 1000 // Create temporary object 1001 dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) 1002 if err != nil { 1003 return nil, err 1004 } 1005 1006 // Do the move 1007 info, err := f.move(ctx, true, srcObj.id, srcLeaf, leaf, srcParentID, directoryID) 1008 if err != nil { 1009 return nil, err 1010 } 1011 1012 err = dstObj.setMetaData(info) 1013 if err != nil { 1014 return nil, err 1015 } 1016 return dstObj, nil 1017 } 1018 1019 // DirMove moves src, srcRemote to this remote at dstRemote 1020 // using server side move operations. 1021 // 1022 // Will only be called if src.Fs().Name() == f.Name() 1023 // 1024 // If it isn't possible then return fs.ErrorCantDirMove 1025 // 1026 // If destination exists then return fs.ErrorDirExists 1027 func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { 1028 srcFs, ok := src.(*Fs) 1029 if !ok { 1030 fs.Debugf(srcFs, "Can't move directory - not same remote type") 1031 return fs.ErrorCantDirMove 1032 } 1033 srcPath := path.Join(srcFs.root, srcRemote) 1034 dstPath := path.Join(f.root, dstRemote) 1035 1036 // Refuse to move to or from the root 1037 if srcPath == "" || dstPath == "" { 1038 fs.Debugf(src, "DirMove error: Can't move root") 1039 return errors.New("can't move root directory") 1040 } 1041 1042 // find the root src directory 1043 err := srcFs.dirCache.FindRoot(ctx, false) 1044 if err != nil { 1045 return err 1046 } 1047 1048 // find the root dst directory 1049 if dstRemote != "" { 1050 err = f.dirCache.FindRoot(ctx, true) 1051 if err != nil { 1052 return err 1053 } 1054 } else { 1055 if f.dirCache.FoundRoot() { 1056 return fs.ErrorDirExists 1057 } 1058 } 1059 1060 // Find ID of dst parent, creating subdirs if necessary 1061 var leaf, directoryID string 1062 findPath := dstRemote 1063 if dstRemote == "" { 1064 findPath = f.root 1065 } 1066 leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true) 1067 if err != nil { 1068 return err 1069 } 1070 1071 // Check destination does not exist 1072 if dstRemote != "" { 1073 _, err = f.dirCache.FindDir(ctx, dstRemote, false) 1074 if err == fs.ErrorDirNotFound { 1075 // OK 1076 } else if err != nil { 1077 return err 1078 } else { 1079 return fs.ErrorDirExists 1080 } 1081 } 1082 1083 // Find ID of src 1084 srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) 1085 if err != nil { 1086 return err 1087 } 1088 1089 // Find ID of src parent, not creating subdirs 1090 var srcLeaf, srcDirectoryID string 1091 findPath = srcRemote 1092 if srcRemote == "" { 1093 findPath = srcFs.root 1094 } 1095 srcLeaf, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false) 1096 if err != nil { 1097 return err 1098 } 1099 1100 // Do the move 1101 _, err = f.move(ctx, false, srcID, srcLeaf, leaf, srcDirectoryID, directoryID) 1102 if err != nil { 1103 return err 1104 } 1105 srcFs.dirCache.FlushDir(srcRemote) 1106 return nil 1107 } 1108 1109 // Copy src to this remote using server side copy operations. 1110 // 1111 // This is stored with the remote path given 1112 // 1113 // It returns the destination Object and a possible error 1114 // 1115 // Will only be called if src.Fs().Name() == f.Name() 1116 // 1117 // If it isn't possible then return fs.ErrorCantCopy 1118 func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { 1119 srcObj, ok := src.(*Object) 1120 if !ok { 1121 fs.Debugf(src, "Can't copy - not same remote type") 1122 return nil, fs.ErrorCantCopy 1123 } 1124 1125 err = srcObj.readMetaData(ctx) 1126 if err != nil { 1127 return nil, err 1128 } 1129 1130 // Find ID of src parent, not creating subdirs 1131 srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) 1132 if err != nil { 1133 return nil, err 1134 } 1135 srcLeaf = f.opt.Enc.FromStandardName(srcLeaf) 1136 _ = srcParentID 1137 1138 // Create temporary object 1139 dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) 1140 if err != nil { 1141 return nil, err 1142 } 1143 dstLeaf = f.opt.Enc.FromStandardName(dstLeaf) 1144 1145 sameName := strings.ToLower(srcLeaf) == strings.ToLower(dstLeaf) 1146 if sameName && srcParentID == dstParentID { 1147 return nil, errors.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf) 1148 } 1149 1150 // Discover whether we can just copy directly or not 1151 directCopy := false 1152 if sameName { 1153 // if copying to same name can copy directly 1154 directCopy = true 1155 } else { 1156 // if (dstParentID, srcLeaf) does not exist then can 1157 // Copy then Rename without fear of overwriting 1158 // something 1159 _, err := f.readMetaDataForIDPath(ctx, dstParentID, srcLeaf, false, false) 1160 if err == fs.ErrorObjectNotFound || err == fs.ErrorDirNotFound { 1161 directCopy = true 1162 } else if err != nil { 1163 return nil, errors.Wrap(err, "copy: failed to examine destination dir") 1164 } else { 1165 // otherwise need to copy via a temporary directlry 1166 } 1167 } 1168 1169 // Copy direct to destination unless !directCopy in which case 1170 // copy via a temporary directory 1171 copyTargetDirID := dstParentID 1172 if !directCopy { 1173 // Create a temporary directory to copy the object in to 1174 tmpDir := "rclone-temp-dir-" + random.String(16) 1175 err = f.Mkdir(ctx, tmpDir) 1176 if err != nil { 1177 return nil, errors.Wrap(err, "copy: failed to make temp dir") 1178 } 1179 defer func() { 1180 rmdirErr := f.Rmdir(ctx, tmpDir) 1181 if rmdirErr != nil && err == nil { 1182 err = errors.Wrap(rmdirErr, "copy: failed to remove temp dir") 1183 } 1184 }() 1185 tmpDirID, err := f.dirCache.FindDir(ctx, tmpDir, false) 1186 if err != nil { 1187 return nil, errors.Wrap(err, "copy: failed to find temp dir") 1188 } 1189 copyTargetDirID = tmpDirID 1190 } 1191 1192 // Copy the object 1193 opts := rest.Opts{ 1194 Method: "POST", 1195 Path: "/Items(" + srcObj.id + ")/Copy", 1196 Parameters: url.Values{ 1197 "$select": {api.ListRequestSelect}, 1198 "overwrite": {"false"}, 1199 "targetid": {copyTargetDirID}, 1200 }, 1201 } 1202 var resp *http.Response 1203 var info *api.Item 1204 err = f.pacer.Call(func() (bool, error) { 1205 resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) 1206 return shouldRetry(resp, err) 1207 }) 1208 if err != nil { 1209 return nil, err 1210 } 1211 1212 // Rename into the correct name and directory if required and 1213 // set the modtime since the copy doesn't preserve it 1214 var updateParentID, updateLeaf string // only set these if necessary 1215 if srcLeaf != dstLeaf { 1216 updateLeaf = dstLeaf 1217 } 1218 if !directCopy { 1219 updateParentID = dstParentID 1220 } 1221 // set new modtime regardless 1222 info, err = f.updateItem(ctx, info.ID, updateLeaf, updateParentID, &srcObj.modTime) 1223 if err != nil { 1224 return nil, err 1225 } 1226 err = dstObj.setMetaData(info) 1227 if err != nil { 1228 return nil, err 1229 } 1230 return dstObj, nil 1231 } 1232 1233 // DirCacheFlush resets the directory cache - used in testing as an 1234 // optional interface 1235 func (f *Fs) DirCacheFlush() { 1236 f.dirCache.ResetRoot() 1237 } 1238 1239 // Hashes returns the supported hash sets. 1240 func (f *Fs) Hashes() hash.Set { 1241 return hash.Set(hash.MD5) 1242 } 1243 1244 // ------------------------------------------------------------ 1245 1246 // Fs returns the parent Fs 1247 func (o *Object) Fs() fs.Info { 1248 return o.fs 1249 } 1250 1251 // Return a string version 1252 func (o *Object) String() string { 1253 if o == nil { 1254 return "<nil>" 1255 } 1256 return o.remote 1257 } 1258 1259 // Remote returns the remote path 1260 func (o *Object) Remote() string { 1261 return o.remote 1262 } 1263 1264 // Hash returns the SHA-1 of an object returning a lowercase hex string 1265 func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { 1266 if t != hash.MD5 { 1267 return "", hash.ErrUnsupported 1268 } 1269 err := o.readMetaData(ctx) 1270 if err != nil { 1271 return "", err 1272 } 1273 return o.md5, nil 1274 } 1275 1276 // Size returns the size of an object in bytes 1277 func (o *Object) Size() int64 { 1278 err := o.readMetaData(context.TODO()) 1279 if err != nil { 1280 fs.Logf(o, "Failed to read metadata: %v", err) 1281 return 0 1282 } 1283 return o.size 1284 } 1285 1286 // setMetaData sets the metadata from info 1287 func (o *Object) setMetaData(info *api.Item) (err error) { 1288 if info.Type != api.ItemTypeFile { 1289 return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type) 1290 } 1291 o.hasMetaData = true 1292 o.size = info.Size 1293 if !info.ModifiedAt.IsZero() { 1294 o.modTime = info.ModifiedAt 1295 } else { 1296 o.modTime = info.CreatedAt 1297 } 1298 o.id = info.ID 1299 o.md5 = info.Hash 1300 return nil 1301 } 1302 1303 // readMetaData gets the metadata if it hasn't already been fetched 1304 // 1305 // it also sets the info 1306 func (o *Object) readMetaData(ctx context.Context) (err error) { 1307 if o.hasMetaData { 1308 return nil 1309 } 1310 var info *api.Item 1311 if o.id != "" { 1312 info, err = o.fs.readMetaDataForID(ctx, o.id, false, true) 1313 } else { 1314 info, err = o.fs.readMetaDataForPath(ctx, o.remote, false, true) 1315 } 1316 if err != nil { 1317 return err 1318 } 1319 return o.setMetaData(info) 1320 } 1321 1322 // ModTime returns the modification time of the object 1323 // 1324 // 1325 // It attempts to read the objects mtime and if that isn't present the 1326 // LastModified returned in the http headers 1327 func (o *Object) ModTime(ctx context.Context) time.Time { 1328 err := o.readMetaData(ctx) 1329 if err != nil { 1330 fs.Logf(o, "Failed to read metadata: %v", err) 1331 return time.Now() 1332 } 1333 return o.modTime 1334 } 1335 1336 // SetModTime sets the modification time of the local fs object 1337 func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { 1338 info, err := o.fs.updateItem(ctx, o.id, "", "", &modTime) 1339 if err != nil { 1340 return err 1341 } 1342 err = o.setMetaData(info) 1343 if err != nil { 1344 return err 1345 } 1346 return nil 1347 } 1348 1349 // Storable returns a boolean showing whether this object storable 1350 func (o *Object) Storable() bool { 1351 return true 1352 } 1353 1354 // Open an object for read 1355 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { 1356 opts := rest.Opts{ 1357 Method: "GET", 1358 Path: "/Items(" + o.id + ")/Download", 1359 Parameters: url.Values{ 1360 "redirect": {"false"}, 1361 }, 1362 } 1363 var resp *http.Response 1364 var dl api.DownloadSpecification 1365 err = o.fs.pacer.Call(func() (bool, error) { 1366 resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl) 1367 return shouldRetry(resp, err) 1368 }) 1369 if err != nil { 1370 return nil, errors.Wrap(err, "open: fetch download specification") 1371 } 1372 1373 fs.FixRangeOption(options, o.size) 1374 opts = rest.Opts{ 1375 Path: "", 1376 RootURL: dl.URL, 1377 Method: "GET", 1378 Options: options, 1379 } 1380 err = o.fs.pacer.Call(func() (bool, error) { 1381 resp, err = o.fs.srv.Call(ctx, &opts) 1382 return shouldRetry(resp, err) 1383 }) 1384 if err != nil { 1385 return nil, errors.Wrap(err, "open") 1386 } 1387 return resp.Body, err 1388 } 1389 1390 // Update the object with the contents of the io.Reader, modTime and size 1391 // 1392 // If existing is set then it updates the object rather than creating a new one 1393 // 1394 // The new object may have been created if an error is returned 1395 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { 1396 remote := o.Remote() 1397 size := src.Size() 1398 modTime := src.ModTime(ctx) 1399 isLargeFile := size < 0 || size > int64(o.fs.opt.UploadCutoff) 1400 1401 // Create the directory for the object if it doesn't exist 1402 leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true) 1403 if err != nil { 1404 return err 1405 } 1406 leaf = o.fs.opt.Enc.FromStandardName(leaf) 1407 var req = api.UploadRequest{ 1408 Method: "standard", 1409 Raw: true, 1410 Filename: leaf, 1411 Overwrite: true, 1412 CreatedDate: modTime, 1413 ModifiedDate: modTime, 1414 Tool: fs.Config.UserAgent, 1415 } 1416 1417 if isLargeFile { 1418 if size < 0 { 1419 // For files of indeterminate size, use streamed 1420 req.Method = "streamed" 1421 } else { 1422 // otherwise use threaded which is more efficient 1423 req.Method = "threaded" 1424 req.ThreadCount = &fs.Config.Transfers 1425 req.Filesize = &size 1426 } 1427 } 1428 1429 var resp *http.Response 1430 var info api.UploadSpecification 1431 opts := rest.Opts{ 1432 Method: "POST", 1433 Path: "/Items(" + directoryID + ")/Upload2", 1434 } 1435 err = o.fs.pacer.Call(func() (bool, error) { 1436 resp, err = o.fs.srv.CallJSON(ctx, &opts, &req, &info) 1437 return shouldRetry(resp, err) 1438 }) 1439 if err != nil { 1440 return errors.Wrap(err, "upload get specification") 1441 } 1442 1443 // If file is large then upload in parts 1444 if isLargeFile { 1445 up, err := o.fs.newLargeUpload(ctx, o, in, src, &info) 1446 if err != nil { 1447 return err 1448 } 1449 return up.Upload(ctx) 1450 } 1451 1452 // Single part upload 1453 opts = rest.Opts{ 1454 Method: "POST", 1455 RootURL: info.ChunkURI + "&fmt=json", 1456 Body: in, 1457 ContentLength: &size, 1458 } 1459 var finish api.UploadFinishResponse 1460 err = o.fs.pacer.CallNoRetry(func() (bool, error) { 1461 resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &finish) 1462 return shouldRetry(resp, err) 1463 }) 1464 if err != nil { 1465 return errors.Wrap(err, "upload file") 1466 } 1467 return o.checkUploadResponse(ctx, &finish) 1468 } 1469 1470 // Check the upload response and update the metadata on the object 1471 func (o *Object) checkUploadResponse(ctx context.Context, finish *api.UploadFinishResponse) (err error) { 1472 // Find returned ID 1473 id, err := finish.ID() 1474 if err != nil { 1475 return err 1476 } 1477 1478 // Read metadata 1479 o.id = id 1480 o.hasMetaData = false 1481 return o.readMetaData(ctx) 1482 } 1483 1484 // Remove an object by ID 1485 func (f *Fs) remove(ctx context.Context, id string) (err error) { 1486 opts := rest.Opts{ 1487 Method: "DELETE", 1488 Path: "/Items(" + id + ")", 1489 Parameters: url.Values{ 1490 "singleversion": {"false"}, 1491 "forceSync": {"true"}, 1492 }, 1493 NoResponse: true, 1494 } 1495 var resp *http.Response 1496 err = f.pacer.Call(func() (bool, error) { 1497 resp, err = f.srv.Call(ctx, &opts) 1498 return shouldRetry(resp, err) 1499 }) 1500 if err != nil { 1501 return errors.Wrap(err, "remove") 1502 } 1503 return nil 1504 } 1505 1506 // Remove an object 1507 func (o *Object) Remove(ctx context.Context) error { 1508 err := o.readMetaData(ctx) 1509 if err != nil { 1510 return errors.Wrap(err, "Remove: Failed to read metadata") 1511 } 1512 return o.fs.remove(ctx, o.id) 1513 } 1514 1515 // ID returns the ID of the Object if known, or "" if not 1516 func (o *Object) ID() string { 1517 return o.id 1518 } 1519 1520 // Check the interfaces are satisfied 1521 var ( 1522 _ fs.Fs = (*Fs)(nil) 1523 _ fs.Purger = (*Fs)(nil) 1524 _ fs.Mover = (*Fs)(nil) 1525 _ fs.DirMover = (*Fs)(nil) 1526 _ fs.Copier = (*Fs)(nil) 1527 _ fs.PutStreamer = (*Fs)(nil) 1528 _ fs.DirCacheFlusher = (*Fs)(nil) 1529 _ fs.Object = (*Object)(nil) 1530 _ fs.IDer = (*Object)(nil) 1531 )