github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/box/box.go (about) 1 // Package box provides an interface to the Box 2 // object storage system. 3 package box 4 5 // FIXME Box only supports file names of 255 characters or less. Names 6 // that will not be supported are those that contain non-printable 7 // ascii, / or \, names with trailing spaces, and the special names 8 // “.” and “..”. 9 10 // FIXME box can copy a directory 11 12 import ( 13 "context" 14 "crypto/rsa" 15 "encoding/json" 16 "encoding/pem" 17 "errors" 18 "fmt" 19 "io" 20 "net/http" 21 "net/url" 22 "os" 23 "path" 24 "strconv" 25 "strings" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 "github.com/golang-jwt/jwt/v4" 31 "github.com/rclone/rclone/backend/box/api" 32 "github.com/rclone/rclone/fs" 33 "github.com/rclone/rclone/fs/config" 34 "github.com/rclone/rclone/fs/config/configmap" 35 "github.com/rclone/rclone/fs/config/configstruct" 36 "github.com/rclone/rclone/fs/config/obscure" 37 "github.com/rclone/rclone/fs/fserrors" 38 "github.com/rclone/rclone/fs/fshttp" 39 "github.com/rclone/rclone/fs/hash" 40 "github.com/rclone/rclone/lib/dircache" 41 "github.com/rclone/rclone/lib/encoder" 42 "github.com/rclone/rclone/lib/env" 43 "github.com/rclone/rclone/lib/jwtutil" 44 "github.com/rclone/rclone/lib/oauthutil" 45 "github.com/rclone/rclone/lib/pacer" 46 "github.com/rclone/rclone/lib/rest" 47 "github.com/youmark/pkcs8" 48 "golang.org/x/oauth2" 49 ) 50 51 const ( 52 rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho" 53 rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL" 54 minSleep = 10 * time.Millisecond 55 maxSleep = 2 * time.Second 56 decayConstant = 2 // bigger for slower decay, exponential 57 rootURL = "https://api.box.com/2.0" 58 uploadURL = "https://upload.box.com/api/2.0" 59 minUploadCutoff = 50000000 // upload cutoff can be no lower than this 60 defaultUploadCutoff = 50 * 1024 * 1024 61 tokenURL = "https://api.box.com/oauth2/token" 62 ) 63 64 // Globals 65 var ( 66 // Description of how to auth for this app 67 oauthConfig = &oauth2.Config{ 68 Scopes: nil, 69 Endpoint: oauth2.Endpoint{ 70 AuthURL: "https://app.box.com/api/oauth2/authorize", 71 TokenURL: "https://app.box.com/api/oauth2/token", 72 }, 73 ClientID: rcloneClientID, 74 ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), 75 RedirectURL: oauthutil.RedirectURL, 76 } 77 ) 78 79 type boxCustomClaims struct { 80 jwt.StandardClaims 81 BoxSubType string `json:"box_sub_type,omitempty"` 82 } 83 84 // Register with Fs 85 func init() { 86 fs.Register(&fs.RegInfo{ 87 Name: "box", 88 Description: "Box", 89 NewFs: NewFs, 90 Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { 91 jsonFile, ok := m.Get("box_config_file") 92 boxSubType, boxSubTypeOk := m.Get("box_sub_type") 93 boxAccessToken, boxAccessTokenOk := m.Get("access_token") 94 var err error 95 // If using box config.json, use JWT auth 96 if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" { 97 err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m) 98 if err != nil { 99 return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err) 100 } 101 // Else, if not using an access token, use oauth2 102 } else if boxAccessToken == "" || !boxAccessTokenOk { 103 return oauthutil.ConfigOut("", &oauthutil.Options{ 104 OAuth2Config: oauthConfig, 105 }) 106 } 107 return nil, nil 108 }, 109 Options: append(oauthutil.SharedOptions, []fs.Option{{ 110 Name: "root_folder_id", 111 Help: "Fill in for rclone to use a non root folder as its starting point.", 112 Default: "0", 113 Advanced: true, 114 Sensitive: true, 115 }, { 116 Name: "box_config_file", 117 Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp, 118 }, { 119 Name: "access_token", 120 Help: "Box App Primary Access Token\n\nLeave blank normally.", 121 Sensitive: true, 122 }, { 123 Name: "box_sub_type", 124 Default: "user", 125 Examples: []fs.OptionExample{{ 126 Value: "user", 127 Help: "Rclone should act on behalf of a user.", 128 }, { 129 Value: "enterprise", 130 Help: "Rclone should act on behalf of a service account.", 131 }}, 132 }, { 133 Name: "upload_cutoff", 134 Help: "Cutoff for switching to multipart upload (>= 50 MiB).", 135 Default: fs.SizeSuffix(defaultUploadCutoff), 136 Advanced: true, 137 }, { 138 Name: "commit_retries", 139 Help: "Max number of times to try committing a multipart file.", 140 Default: 100, 141 Advanced: true, 142 }, { 143 Name: "list_chunk", 144 Default: 1000, 145 Help: "Size of listing chunk 1-1000.", 146 Advanced: true, 147 }, { 148 Name: "owned_by", 149 Default: "", 150 Help: "Only show items owned by the login (email address) passed in.", 151 Advanced: true, 152 }, { 153 Name: "impersonate", 154 Default: "", 155 Help: `Impersonate this user ID when using a service account. 156 157 Setting this flag allows rclone, when using a JWT service account, to 158 act on behalf of another user by setting the as-user header. 159 160 The user ID is the Box identifier for a user. User IDs can found for 161 any user via the GET /users endpoint, which is only available to 162 admins, or by calling the GET /users/me endpoint with an authenticated 163 user session. 164 165 See: https://developer.box.com/guides/authentication/jwt/as-user/ 166 `, 167 Advanced: true, 168 Sensitive: true, 169 }, { 170 Name: config.ConfigEncoding, 171 Help: config.ConfigEncodingHelp, 172 Advanced: true, 173 // From https://developer.box.com/docs/error-codes#section-400-bad-request : 174 // > Box only supports file or folder names that are 255 characters or less. 175 // > File names containing non-printable ascii, "/" or "\", names with leading 176 // > or trailing spaces, and the special names “.” and “..” are also unsupported. 177 // 178 // Testing revealed names with leading spaces work fine. 179 // Also encode invalid UTF-8 bytes as json doesn't handle them properly. 180 Default: (encoder.Display | 181 encoder.EncodeBackSlash | 182 encoder.EncodeRightSpace | 183 encoder.EncodeInvalidUtf8), 184 }}...), 185 }) 186 } 187 188 func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error { 189 jsonFile = env.ShellExpand(jsonFile) 190 boxConfig, err := getBoxConfig(jsonFile) 191 if err != nil { 192 return fmt.Errorf("get box config: %w", err) 193 } 194 privateKey, err := getDecryptedPrivateKey(boxConfig) 195 if err != nil { 196 return fmt.Errorf("get decrypted private key: %w", err) 197 } 198 claims, err := getClaims(boxConfig, boxSubType) 199 if err != nil { 200 return fmt.Errorf("get claims: %w", err) 201 } 202 signingHeaders := getSigningHeaders(boxConfig) 203 queryParams := getQueryParams(boxConfig) 204 client := fshttp.NewClient(ctx) 205 err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client) 206 return err 207 } 208 209 func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) { 210 file, err := os.ReadFile(configFile) 211 if err != nil { 212 return nil, fmt.Errorf("box: failed to read Box config: %w", err) 213 } 214 err = json.Unmarshal(file, &boxConfig) 215 if err != nil { 216 return nil, fmt.Errorf("box: failed to parse Box config: %w", err) 217 } 218 return boxConfig, nil 219 } 220 221 func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) { 222 val, err := jwtutil.RandomHex(20) 223 if err != nil { 224 return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err) 225 } 226 227 claims = &boxCustomClaims{ 228 //lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely 229 //nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019 230 StandardClaims: jwt.StandardClaims{ 231 Id: val, 232 Issuer: boxConfig.BoxAppSettings.ClientID, 233 Subject: boxConfig.EnterpriseID, 234 Audience: tokenURL, 235 ExpiresAt: time.Now().Add(time.Second * 45).Unix(), 236 }, 237 BoxSubType: boxSubType, 238 } 239 return claims, nil 240 } 241 242 func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} { 243 signingHeaders := map[string]interface{}{ 244 "kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, 245 } 246 return signingHeaders 247 } 248 249 func getQueryParams(boxConfig *api.ConfigJSON) map[string]string { 250 queryParams := map[string]string{ 251 "client_id": boxConfig.BoxAppSettings.ClientID, 252 "client_secret": boxConfig.BoxAppSettings.ClientSecret, 253 } 254 255 return queryParams 256 } 257 258 func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) { 259 260 block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey)) 261 if len(rest) > 0 { 262 return nil, fmt.Errorf("box: extra data included in private key: %w", err) 263 } 264 265 rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase)) 266 if err != nil { 267 return nil, fmt.Errorf("box: failed to decrypt private key: %w", err) 268 } 269 270 return rsaKey.(*rsa.PrivateKey), nil 271 } 272 273 // Options defines the configuration for this backend 274 type Options struct { 275 UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` 276 CommitRetries int `config:"commit_retries"` 277 Enc encoder.MultiEncoder `config:"encoding"` 278 RootFolderID string `config:"root_folder_id"` 279 AccessToken string `config:"access_token"` 280 ListChunk int `config:"list_chunk"` 281 OwnedBy string `config:"owned_by"` 282 Impersonate string `config:"impersonate"` 283 } 284 285 // ItemMeta defines metadata we cache for each Item ID 286 type ItemMeta struct { 287 SequenceID int64 // the most recent event processed for this item 288 ParentID string // ID of the parent directory of this item 289 Name string // leaf name of this item 290 } 291 292 // Fs represents a remote box 293 type Fs struct { 294 name string // name of this remote 295 root string // the path we are working on 296 opt Options // parsed options 297 features *fs.Features // optional features 298 srv *rest.Client // the connection to the server 299 dirCache *dircache.DirCache // Map of directory path to directory id 300 pacer *fs.Pacer // pacer for API calls 301 tokenRenewer *oauthutil.Renew // renew the token on expiry 302 uploadToken *pacer.TokenDispenser // control concurrency 303 itemMetaCacheMu *sync.Mutex // protects itemMetaCache 304 itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata 305 } 306 307 // Object describes a box object 308 // 309 // Will definitely have info but maybe not meta 310 type Object struct { 311 fs *Fs // what this object is part of 312 remote string // The remote path 313 hasMetaData bool // whether info below has been set 314 size int64 // size of the object 315 modTime time.Time // modification time of the object 316 id string // ID of the object 317 publicLink string // Public Link for the object 318 sha1 string // SHA-1 of the object content 319 } 320 321 // ------------------------------------------------------------ 322 323 // Name of the remote (as passed into NewFs) 324 func (f *Fs) Name() string { 325 return f.name 326 } 327 328 // Root of the remote (as passed into NewFs) 329 func (f *Fs) Root() string { 330 return f.root 331 } 332 333 // String converts this Fs to a string 334 func (f *Fs) String() string { 335 return fmt.Sprintf("box root '%s'", f.root) 336 } 337 338 // Features returns the optional features of this Fs 339 func (f *Fs) Features() *fs.Features { 340 return f.features 341 } 342 343 // parsePath parses a box 'url' 344 func parsePath(path string) (root string) { 345 root = strings.Trim(path, "/") 346 return 347 } 348 349 // retryErrorCodes is a slice of error codes that we will retry 350 var retryErrorCodes = []int{ 351 429, // Too Many Requests. 352 500, // Internal Server Error 353 502, // Bad Gateway 354 503, // Service Unavailable 355 504, // Gateway Timeout 356 509, // Bandwidth Limit Exceeded 357 } 358 359 // shouldRetry returns a boolean as to whether this resp and err 360 // deserve to be retried. It returns the err as a convenience 361 func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { 362 if fserrors.ContextError(ctx, &err) { 363 return false, err 364 } 365 authRetry := false 366 367 if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") { 368 authRetry = true 369 fs.Debugf(nil, "Should retry: %v", err) 370 } 371 372 // Box API errors which should be retries 373 if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "operation_blocked_temporary" { 374 fs.Debugf(nil, "Retrying API error %v", err) 375 return true, err 376 } 377 378 return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err 379 } 380 381 // readMetaDataForPath reads the metadata from the path 382 func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { 383 // defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) 384 leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) 385 if err != nil { 386 if err == fs.ErrorDirNotFound { 387 return nil, fs.ErrorObjectNotFound 388 } 389 return nil, err 390 } 391 392 // Use preupload to find the ID 393 itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1) 394 if err != nil { 395 return nil, err 396 } 397 if itemMini == nil { 398 return nil, fs.ErrorObjectNotFound 399 } 400 401 // Now we have the ID we can look up the object proper 402 opts := rest.Opts{ 403 Method: "GET", 404 Path: "/files/" + itemMini.ID, 405 Parameters: fieldsValue(), 406 } 407 var item api.Item 408 err = f.pacer.Call(func() (bool, error) { 409 resp, err := f.srv.CallJSON(ctx, &opts, nil, &item) 410 return shouldRetry(ctx, resp, err) 411 }) 412 if err != nil { 413 return nil, err 414 } 415 return &item, nil 416 } 417 418 // errorHandler parses a non 2xx error response into an error 419 func errorHandler(resp *http.Response) error { 420 // Decode error response 421 errResponse := new(api.Error) 422 err := rest.DecodeJSON(resp, &errResponse) 423 if err != nil { 424 fs.Debugf(nil, "Couldn't decode error response: %v", err) 425 } 426 if errResponse.Code == "" { 427 errResponse.Code = resp.Status 428 } 429 if errResponse.Status == 0 { 430 errResponse.Status = resp.StatusCode 431 } 432 return errResponse 433 } 434 435 // NewFs constructs an Fs from the path, container:path 436 func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { 437 // Parse config into Options struct 438 opt := new(Options) 439 err := configstruct.Set(m, opt) 440 if err != nil { 441 return nil, err 442 } 443 444 if opt.UploadCutoff < minUploadCutoff { 445 return nil, fmt.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff)) 446 } 447 448 root = parsePath(root) 449 450 client := fshttp.NewClient(ctx) 451 var ts *oauthutil.TokenSource 452 // If not using an accessToken, create an oauth client and tokensource 453 if opt.AccessToken == "" { 454 client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig) 455 if err != nil { 456 return nil, fmt.Errorf("failed to configure Box: %w", err) 457 } 458 } 459 460 ci := fs.GetConfig(ctx) 461 f := &Fs{ 462 name: name, 463 root: root, 464 opt: *opt, 465 srv: rest.NewClient(client).SetRoot(rootURL), 466 pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), 467 uploadToken: pacer.NewTokenDispenser(ci.Transfers), 468 itemMetaCacheMu: new(sync.Mutex), 469 itemMetaCache: make(map[string]ItemMeta), 470 } 471 f.features = (&fs.Features{ 472 CaseInsensitive: true, 473 CanHaveEmptyDirectories: true, 474 }).Fill(ctx, f) 475 f.srv.SetErrorHandler(errorHandler) 476 477 // If using an accessToken, set the Authorization header 478 if f.opt.AccessToken != "" { 479 f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) 480 } 481 482 // If using impersonate set an as-user header 483 if f.opt.Impersonate != "" { 484 f.srv.SetHeader("as-user", f.opt.Impersonate) 485 } 486 487 jsonFile, ok := m.Get("box_config_file") 488 boxSubType, boxSubTypeOk := m.Get("box_sub_type") 489 490 if ts != nil { 491 // If using box config.json and JWT, renewing should just refresh the token and 492 // should do so whether there are uploads pending or not. 493 if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" { 494 f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { 495 err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m) 496 return err 497 }) 498 f.tokenRenewer.Start() 499 } else { 500 // Renew the token in the background 501 f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { 502 _, err := f.readMetaDataForPath(ctx, "") 503 return err 504 }) 505 } 506 } 507 508 // Get rootFolderID 509 rootID := f.opt.RootFolderID 510 f.dirCache = dircache.New(root, rootID, f) 511 512 // Find the current root 513 err = f.dirCache.FindRoot(ctx, false) 514 if err != nil { 515 // Assume it is a file 516 newRoot, remote := dircache.SplitPath(root) 517 tempF := *f 518 tempF.dirCache = dircache.New(newRoot, rootID, &tempF) 519 tempF.root = newRoot 520 // Make new Fs which is the parent 521 err = tempF.dirCache.FindRoot(ctx, false) 522 if err != nil { 523 // No root so return old f 524 return f, nil 525 } 526 _, err := tempF.newObjectWithInfo(ctx, remote, nil) 527 if err != nil { 528 if err == fs.ErrorObjectNotFound { 529 // File doesn't exist so return old f 530 return f, nil 531 } 532 return nil, err 533 } 534 f.features.Fill(ctx, &tempF) 535 // XXX: update the old f here instead of returning tempF, since 536 // `features` were already filled with functions having *f as a receiver. 537 // See https://github.com/rclone/rclone/issues/2182 538 f.dirCache = tempF.dirCache 539 f.root = tempF.root 540 // return an error with an fs which points to the parent 541 return f, fs.ErrorIsFile 542 } 543 return f, nil 544 } 545 546 // rootSlash returns root with a slash on if it is empty, otherwise empty string 547 func (f *Fs) rootSlash() string { 548 if f.root == "" { 549 return f.root 550 } 551 return f.root + "/" 552 } 553 554 // Return an Object from a path 555 // 556 // If it can't be found it returns the error fs.ErrorObjectNotFound. 557 func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { 558 o := &Object{ 559 fs: f, 560 remote: remote, 561 } 562 var err error 563 if info != nil { 564 // Set info 565 err = o.setMetaData(info) 566 } else { 567 err = o.readMetaData(ctx) // reads info and meta, returning an error 568 } 569 if err != nil { 570 return nil, err 571 } 572 return o, nil 573 } 574 575 // NewObject finds the Object at remote. If it can't be found 576 // it returns the error fs.ErrorObjectNotFound. 577 func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { 578 return f.newObjectWithInfo(ctx, remote, nil) 579 } 580 581 // FindLeaf finds a directory of name leaf in the folder with ID pathID 582 func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { 583 // Find the leaf in pathID 584 found, err = f.listAll(ctx, pathID, true, false, true, func(item *api.Item) bool { 585 if strings.EqualFold(item.Name, leaf) { 586 pathIDOut = item.ID 587 return true 588 } 589 return false 590 }) 591 return pathIDOut, found, err 592 } 593 594 // fieldsValue creates a url.Values with fields set to those in api.Item 595 func fieldsValue() url.Values { 596 values := url.Values{} 597 values.Set("fields", api.ItemFields) 598 return values 599 } 600 601 // CreateDir makes a directory with pathID as parent and name leaf 602 func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { 603 // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) 604 var resp *http.Response 605 var info *api.Item 606 opts := rest.Opts{ 607 Method: "POST", 608 Path: "/folders", 609 Parameters: fieldsValue(), 610 } 611 mkdir := api.CreateFolder{ 612 Name: f.opt.Enc.FromStandardName(leaf), 613 Parent: api.Parent{ 614 ID: pathID, 615 }, 616 } 617 err = f.pacer.Call(func() (bool, error) { 618 resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info) 619 return shouldRetry(ctx, resp, err) 620 }) 621 if err != nil { 622 //fmt.Printf("...Error %v\n", err) 623 return "", err 624 } 625 // fmt.Printf("...Id %q\n", *info.Id) 626 return info.ID, nil 627 } 628 629 // list the objects into the function supplied 630 // 631 // If directories is set it only sends directories 632 // User function to process a File item from listAll 633 // 634 // Should return true to finish processing 635 type listAllFn func(*api.Item) bool 636 637 // Lists the directory required calling the user function on each item found 638 // 639 // If the user fn ever returns true then it early exits with found = true 640 func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, activeOnly bool, fn listAllFn) (found bool, err error) { 641 opts := rest.Opts{ 642 Method: "GET", 643 Path: "/folders/" + dirID + "/items", 644 Parameters: fieldsValue(), 645 } 646 opts.Parameters.Set("limit", strconv.Itoa(f.opt.ListChunk)) 647 opts.Parameters.Set("usemarker", "true") 648 var marker *string 649 OUTER: 650 for { 651 if marker != nil { 652 opts.Parameters.Set("marker", *marker) 653 } 654 655 var result api.FolderItems 656 var resp *http.Response 657 err = f.pacer.Call(func() (bool, error) { 658 resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) 659 return shouldRetry(ctx, resp, err) 660 }) 661 if err != nil { 662 return found, fmt.Errorf("couldn't list files: %w", err) 663 } 664 for i := range result.Entries { 665 item := &result.Entries[i] 666 if item.Type == api.ItemTypeFolder { 667 if filesOnly { 668 continue 669 } 670 } else if item.Type == api.ItemTypeFile { 671 if directoriesOnly { 672 continue 673 } 674 } else { 675 fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) 676 continue 677 } 678 if activeOnly && item.ItemStatus != api.ItemStatusActive { 679 continue 680 } 681 if f.opt.OwnedBy != "" && f.opt.OwnedBy != item.OwnedBy.Login { 682 continue 683 } 684 item.Name = f.opt.Enc.ToStandardName(item.Name) 685 if fn(item) { 686 found = true 687 break OUTER 688 } 689 } 690 marker = result.NextMarker 691 if marker == nil { 692 break 693 } 694 } 695 return 696 } 697 698 // List the objects and directories in dir into entries. The 699 // entries can be returned in any order but should be for a 700 // complete directory. 701 // 702 // dir should be "" to list the root, and should not have 703 // trailing slashes. 704 // 705 // This should return ErrDirNotFound if the directory isn't 706 // found. 707 func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { 708 directoryID, err := f.dirCache.FindDir(ctx, dir, false) 709 if err != nil { 710 return nil, err 711 } 712 var iErr error 713 _, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool { 714 remote := path.Join(dir, info.Name) 715 if info.Type == api.ItemTypeFolder { 716 // cache the directory ID for later lookups 717 f.dirCache.Put(remote, info.ID) 718 d := fs.NewDir(remote, info.ModTime()).SetID(info.ID) 719 // FIXME more info from dir? 720 entries = append(entries, d) 721 } else if info.Type == api.ItemTypeFile { 722 o, err := f.newObjectWithInfo(ctx, remote, info) 723 if err != nil { 724 iErr = err 725 return true 726 } 727 entries = append(entries, o) 728 } 729 730 // Cache some metadata for this Item to help us process events later 731 // on. In particular, the box event API does not provide the old path 732 // of the Item when it is renamed/deleted/moved/etc. 733 f.itemMetaCacheMu.Lock() 734 cachedItemMeta, found := f.itemMetaCache[info.ID] 735 if !found || cachedItemMeta.SequenceID < info.SequenceID { 736 f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name} 737 } 738 f.itemMetaCacheMu.Unlock() 739 740 return false 741 }) 742 if err != nil { 743 return nil, err 744 } 745 if iErr != nil { 746 return nil, iErr 747 } 748 return entries, nil 749 } 750 751 // Creates from the parameters passed in a half finished Object which 752 // must have setMetaData called on it 753 // 754 // Returns the object, leaf, directoryID and error. 755 // 756 // Used to create new objects 757 func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { 758 // Create the directory for the object if it doesn't exist 759 leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) 760 if err != nil { 761 return 762 } 763 // Temporary Object under construction 764 o = &Object{ 765 fs: f, 766 remote: remote, 767 } 768 return o, leaf, directoryID, nil 769 } 770 771 // preUploadCheck checks to see if a file can be uploaded 772 // 773 // It returns "", nil if the file is good to go 774 // It returns "ID", nil if the file must be updated 775 func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) { 776 check := api.PreUploadCheck{ 777 Name: f.opt.Enc.FromStandardName(leaf), 778 Parent: api.Parent{ 779 ID: directoryID, 780 }, 781 } 782 if size >= 0 { 783 check.Size = &size 784 } 785 opts := rest.Opts{ 786 Method: "OPTIONS", 787 Path: "/files/content/", 788 } 789 var result api.PreUploadCheckResponse 790 var resp *http.Response 791 err = f.pacer.Call(func() (bool, error) { 792 resp, err = f.srv.CallJSON(ctx, &opts, &check, &result) 793 return shouldRetry(ctx, resp, err) 794 }) 795 if err != nil { 796 if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" { 797 var conflict api.PreUploadCheckConflict 798 err = json.Unmarshal(apiErr.ContextInfo, &conflict) 799 if err != nil { 800 return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err) 801 } 802 if conflict.Conflicts.Type != api.ItemTypeFile { 803 return nil, fs.ErrorIsDir 804 } 805 return &conflict.Conflicts, nil 806 } 807 return nil, fmt.Errorf("pre-upload check: %w", err) 808 } 809 return nil, nil 810 } 811 812 // Put the object 813 // 814 // Copy the reader in to the new object which is returned. 815 // 816 // The new object may have been created if an error is returned 817 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 818 // If directory doesn't exist, file doesn't exist so can upload 819 remote := src.Remote() 820 leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) 821 if err != nil { 822 if err == fs.ErrorDirNotFound { 823 return f.PutUnchecked(ctx, in, src, options...) 824 } 825 return nil, err 826 } 827 828 // Preflight check the upload, which returns the ID if the 829 // object already exists 830 item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size()) 831 if err != nil { 832 return nil, err 833 } 834 if item == nil { 835 return f.PutUnchecked(ctx, in, src, options...) 836 } 837 838 // If object exists then create a skeleton one with just id 839 o := &Object{ 840 fs: f, 841 remote: remote, 842 id: item.ID, 843 } 844 return o, o.Update(ctx, in, src, options...) 845 } 846 847 // PutStream uploads to the remote path with the modTime given of indeterminate size 848 func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 849 return f.Put(ctx, in, src, options...) 850 } 851 852 // PutUnchecked the object into the container 853 // 854 // This will produce an error if the object already exists. 855 // 856 // Copy the reader in to the new object which is returned. 857 // 858 // The new object may have been created if an error is returned 859 func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 860 remote := src.Remote() 861 size := src.Size() 862 modTime := src.ModTime(ctx) 863 864 o, _, _, err := f.createObject(ctx, remote, modTime, size) 865 if err != nil { 866 return nil, err 867 } 868 return o, o.Update(ctx, in, src, options...) 869 } 870 871 // Mkdir creates the container if it doesn't exist 872 func (f *Fs) Mkdir(ctx context.Context, dir string) error { 873 _, err := f.dirCache.FindDir(ctx, dir, true) 874 return err 875 } 876 877 // deleteObject removes an object by ID 878 func (f *Fs) deleteObject(ctx context.Context, id string) error { 879 opts := rest.Opts{ 880 Method: "DELETE", 881 Path: "/files/" + id, 882 NoResponse: true, 883 } 884 return f.pacer.Call(func() (bool, error) { 885 resp, err := f.srv.Call(ctx, &opts) 886 return shouldRetry(ctx, resp, err) 887 }) 888 } 889 890 // purgeCheck removes the root directory, if check is set then it 891 // refuses to do so if it has anything in 892 func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { 893 root := path.Join(f.root, dir) 894 if root == "" { 895 return errors.New("can't purge root directory") 896 } 897 dc := f.dirCache 898 rootID, err := dc.FindDir(ctx, dir, false) 899 if err != nil { 900 return err 901 } 902 903 opts := rest.Opts{ 904 Method: "DELETE", 905 Path: "/folders/" + rootID, 906 Parameters: url.Values{}, 907 NoResponse: true, 908 } 909 opts.Parameters.Set("recursive", strconv.FormatBool(!check)) 910 var resp *http.Response 911 err = f.pacer.Call(func() (bool, error) { 912 resp, err = f.srv.Call(ctx, &opts) 913 return shouldRetry(ctx, resp, err) 914 }) 915 if err != nil { 916 return fmt.Errorf("rmdir failed: %w", err) 917 } 918 f.dirCache.FlushDir(dir) 919 if err != nil { 920 return err 921 } 922 return nil 923 } 924 925 // Rmdir deletes the root folder 926 // 927 // Returns an error if it isn't empty 928 func (f *Fs) Rmdir(ctx context.Context, dir string) error { 929 return f.purgeCheck(ctx, dir, true) 930 } 931 932 // Precision return the precision of this Fs 933 func (f *Fs) Precision() time.Duration { 934 return time.Second 935 } 936 937 // Copy src to this remote using server-side copy operations. 938 // 939 // This is stored with the remote path given. 940 // 941 // It returns the destination Object and a possible error. 942 // 943 // Will only be called if src.Fs().Name() == f.Name() 944 // 945 // If it isn't possible then return fs.ErrorCantCopy 946 func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 947 srcObj, ok := src.(*Object) 948 if !ok { 949 fs.Debugf(src, "Can't copy - not same remote type") 950 return nil, fs.ErrorCantCopy 951 } 952 err := srcObj.readMetaData(ctx) 953 if err != nil { 954 return nil, err 955 } 956 957 srcPath := srcObj.fs.rootSlash() + srcObj.remote 958 dstPath := f.rootSlash() + remote 959 if strings.EqualFold(srcPath, dstPath) { 960 return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath) 961 } 962 963 // Create temporary object 964 dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) 965 if err != nil { 966 return nil, err 967 } 968 969 // Copy the object 970 opts := rest.Opts{ 971 Method: "POST", 972 Path: "/files/" + srcObj.id + "/copy", 973 Parameters: fieldsValue(), 974 } 975 copyFile := api.CopyFile{ 976 Name: f.opt.Enc.FromStandardName(leaf), 977 Parent: api.Parent{ 978 ID: directoryID, 979 }, 980 } 981 var resp *http.Response 982 var info *api.Item 983 err = f.pacer.Call(func() (bool, error) { 984 resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info) 985 return shouldRetry(ctx, resp, err) 986 }) 987 if err != nil { 988 return nil, err 989 } 990 err = dstObj.setMetaData(info) 991 if err != nil { 992 return nil, err 993 } 994 return dstObj, nil 995 } 996 997 // Purge deletes all the files and the container 998 // 999 // Optional interface: Only implement this if you have a way of 1000 // deleting all the files quicker than just running Remove() on the 1001 // result of List() 1002 func (f *Fs) Purge(ctx context.Context, dir string) error { 1003 return f.purgeCheck(ctx, dir, false) 1004 } 1005 1006 // move a file or folder 1007 func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) { 1008 // Move the object 1009 opts := rest.Opts{ 1010 Method: "PUT", 1011 Path: endpoint + id, 1012 Parameters: fieldsValue(), 1013 } 1014 move := api.UpdateFileMove{ 1015 Name: f.opt.Enc.FromStandardName(leaf), 1016 Parent: api.Parent{ 1017 ID: directoryID, 1018 }, 1019 } 1020 var resp *http.Response 1021 err = f.pacer.Call(func() (bool, error) { 1022 resp, err = f.srv.CallJSON(ctx, &opts, &move, &info) 1023 return shouldRetry(ctx, resp, err) 1024 }) 1025 if err != nil { 1026 return nil, err 1027 } 1028 return info, nil 1029 } 1030 1031 // About gets quota information 1032 func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { 1033 opts := rest.Opts{ 1034 Method: "GET", 1035 Path: "/users/me", 1036 } 1037 var user api.User 1038 var resp *http.Response 1039 err = f.pacer.Call(func() (bool, error) { 1040 resp, err = f.srv.CallJSON(ctx, &opts, nil, &user) 1041 return shouldRetry(ctx, resp, err) 1042 }) 1043 if err != nil { 1044 return nil, fmt.Errorf("failed to read user info: %w", err) 1045 } 1046 // FIXME max upload size would be useful to use in Update 1047 usage = &fs.Usage{ 1048 Used: fs.NewUsageValue(user.SpaceUsed), // bytes in use 1049 Total: fs.NewUsageValue(user.SpaceAmount), // bytes total 1050 Free: fs.NewUsageValue(user.SpaceAmount - user.SpaceUsed), // bytes free 1051 } 1052 return usage, nil 1053 } 1054 1055 // Move src to this remote using server-side move operations. 1056 // 1057 // This is stored with the remote path given. 1058 // 1059 // It returns the destination Object and a possible error. 1060 // 1061 // Will only be called if src.Fs().Name() == f.Name() 1062 // 1063 // If it isn't possible then return fs.ErrorCantMove 1064 func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 1065 srcObj, ok := src.(*Object) 1066 if !ok { 1067 fs.Debugf(src, "Can't move - not same remote type") 1068 return nil, fs.ErrorCantMove 1069 } 1070 1071 // Create temporary object 1072 dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) 1073 if err != nil { 1074 return nil, err 1075 } 1076 1077 // Do the move 1078 info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID) 1079 if err != nil { 1080 return nil, err 1081 } 1082 1083 err = dstObj.setMetaData(info) 1084 if err != nil { 1085 return nil, err 1086 } 1087 return dstObj, nil 1088 } 1089 1090 // DirMove moves src, srcRemote to this remote at dstRemote 1091 // using server-side move operations. 1092 // 1093 // Will only be called if src.Fs().Name() == f.Name() 1094 // 1095 // If it isn't possible then return fs.ErrorCantDirMove 1096 // 1097 // If destination exists then return fs.ErrorDirExists 1098 func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { 1099 srcFs, ok := src.(*Fs) 1100 if !ok { 1101 fs.Debugf(srcFs, "Can't move directory - not same remote type") 1102 return fs.ErrorCantDirMove 1103 } 1104 1105 srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) 1106 if err != nil { 1107 return err 1108 } 1109 1110 // Do the move 1111 _, err = f.move(ctx, "/folders/", srcID, dstLeaf, dstDirectoryID) 1112 if err != nil { 1113 return err 1114 } 1115 srcFs.dirCache.FlushDir(srcRemote) 1116 return nil 1117 } 1118 1119 // PublicLink adds a "readable by anyone with link" permission on the given file or folder. 1120 func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { 1121 id, err := f.dirCache.FindDir(ctx, remote, false) 1122 var opts rest.Opts 1123 if err == nil { 1124 fs.Debugf(f, "attempting to share directory '%s'", remote) 1125 1126 opts = rest.Opts{ 1127 Method: "PUT", 1128 Path: "/folders/" + id, 1129 Parameters: fieldsValue(), 1130 } 1131 } else { 1132 fs.Debugf(f, "attempting to share single file '%s'", remote) 1133 o, err := f.NewObject(ctx, remote) 1134 if err != nil { 1135 return "", err 1136 } 1137 1138 if o.(*Object).publicLink != "" { 1139 return o.(*Object).publicLink, nil 1140 } 1141 1142 opts = rest.Opts{ 1143 Method: "PUT", 1144 Path: "/files/" + o.(*Object).id, 1145 Parameters: fieldsValue(), 1146 } 1147 } 1148 1149 shareLink := api.CreateSharedLink{} 1150 var info api.Item 1151 var resp *http.Response 1152 err = f.pacer.Call(func() (bool, error) { 1153 resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info) 1154 return shouldRetry(ctx, resp, err) 1155 }) 1156 return info.SharedLink.URL, err 1157 } 1158 1159 // deletePermanently permanently deletes a trashed file 1160 func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error { 1161 opts := rest.Opts{ 1162 Method: "DELETE", 1163 NoResponse: true, 1164 } 1165 if itemType == api.ItemTypeFile { 1166 opts.Path = "/files/" + id + "/trash" 1167 } else { 1168 opts.Path = "/folders/" + id + "/trash" 1169 } 1170 return f.pacer.Call(func() (bool, error) { 1171 resp, err := f.srv.Call(ctx, &opts) 1172 return shouldRetry(ctx, resp, err) 1173 }) 1174 } 1175 1176 // CleanUp empties the trash 1177 func (f *Fs) CleanUp(ctx context.Context) (err error) { 1178 var ( 1179 deleteErrors atomic.Uint64 1180 concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers) 1181 wg sync.WaitGroup 1182 ) 1183 _, err = f.listAll(ctx, "trash", false, false, false, func(item *api.Item) bool { 1184 if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile { 1185 wg.Add(1) 1186 concurrencyControl <- struct{}{} 1187 go func() { 1188 defer func() { 1189 <-concurrencyControl 1190 wg.Done() 1191 }() 1192 err := f.deletePermanently(ctx, item.Type, item.ID) 1193 if err != nil { 1194 fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err) 1195 deleteErrors.Add(1) 1196 } 1197 }() 1198 } else { 1199 fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) 1200 } 1201 return false 1202 }) 1203 wg.Wait() 1204 if deleteErrors.Load() != 0 { 1205 return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load()) 1206 } 1207 return err 1208 } 1209 1210 // Shutdown shutdown the fs 1211 func (f *Fs) Shutdown(ctx context.Context) error { 1212 f.tokenRenewer.Shutdown() 1213 return nil 1214 } 1215 1216 // ChangeNotify calls the passed function with a path that has had changes. 1217 // If the implementation uses polling, it should adhere to the given interval. 1218 // 1219 // Automatically restarts itself in case of unexpected behavior of the remote. 1220 // 1221 // Close the returned channel to stop being notified. 1222 func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { 1223 go func() { 1224 // get the `stream_position` early so all changes from now on get processed 1225 streamPosition, err := f.changeNotifyStreamPosition(ctx) 1226 if err != nil { 1227 fs.Infof(f, "Failed to get StreamPosition: %s", err) 1228 } 1229 1230 // box can send duplicate Event IDs. Use this map to track and filter 1231 // the ones we've already processed. 1232 processedEventIDs := make(map[string]time.Time) 1233 1234 var ticker *time.Ticker 1235 var tickerC <-chan time.Time 1236 for { 1237 select { 1238 case pollInterval, ok := <-pollIntervalChan: 1239 if !ok { 1240 if ticker != nil { 1241 ticker.Stop() 1242 } 1243 return 1244 } 1245 if ticker != nil { 1246 ticker.Stop() 1247 ticker, tickerC = nil, nil 1248 } 1249 if pollInterval != 0 { 1250 ticker = time.NewTicker(pollInterval) 1251 tickerC = ticker.C 1252 } 1253 case <-tickerC: 1254 if streamPosition == "" { 1255 streamPosition, err = f.changeNotifyStreamPosition(ctx) 1256 if err != nil { 1257 fs.Infof(f, "Failed to get StreamPosition: %s", err) 1258 continue 1259 } 1260 } 1261 1262 // Garbage collect EventIDs older than 1 minute 1263 for eventID, timestamp := range processedEventIDs { 1264 if time.Since(timestamp) > time.Minute { 1265 delete(processedEventIDs, eventID) 1266 } 1267 } 1268 1269 streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs) 1270 if err != nil { 1271 fs.Infof(f, "Change notify listener failure: %s", err) 1272 } 1273 } 1274 } 1275 }() 1276 } 1277 1278 func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) { 1279 opts := rest.Opts{ 1280 Method: "GET", 1281 Path: "/events", 1282 Parameters: fieldsValue(), 1283 } 1284 opts.Parameters.Set("stream_position", "now") 1285 opts.Parameters.Set("stream_type", "changes") 1286 1287 var result api.Events 1288 var resp *http.Response 1289 err = f.pacer.Call(func() (bool, error) { 1290 resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) 1291 return shouldRetry(ctx, resp, err) 1292 }) 1293 if err != nil { 1294 return "", err 1295 } 1296 1297 return strconv.FormatInt(result.NextStreamPosition, 10), nil 1298 } 1299 1300 // Attempts to construct the full path for an object, given the ID of its 1301 // parent directory and the name of the object. 1302 // 1303 // Can return "" if the parentID is not currently in the directory cache. 1304 func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) { 1305 fullPath = "" 1306 name := f.opt.Enc.ToStandardName(childName) 1307 if parentID != "" { 1308 if parentDir, ok := f.dirCache.GetInv(parentID); ok { 1309 if len(parentDir) > 0 { 1310 fullPath = parentDir + "/" + name 1311 } else { 1312 fullPath = name 1313 } 1314 } 1315 } else { 1316 // No parent, this object is at the root 1317 fullPath = name 1318 } 1319 return fullPath 1320 } 1321 1322 func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) { 1323 nextStreamPosition = streamPosition 1324 1325 for { 1326 limit := f.opt.ListChunk 1327 1328 // box only allows a max of 500 events 1329 if limit > 500 { 1330 limit = 500 1331 } 1332 1333 opts := rest.Opts{ 1334 Method: "GET", 1335 Path: "/events", 1336 Parameters: fieldsValue(), 1337 } 1338 opts.Parameters.Set("stream_position", nextStreamPosition) 1339 opts.Parameters.Set("stream_type", "changes") 1340 opts.Parameters.Set("limit", strconv.Itoa(limit)) 1341 1342 var result api.Events 1343 var resp *http.Response 1344 fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition) 1345 err = f.pacer.Call(func() (bool, error) { 1346 resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) 1347 return shouldRetry(ctx, resp, err) 1348 }) 1349 if err != nil { 1350 return "", err 1351 } 1352 1353 if result.ChunkSize != int64(len(result.Entries)) { 1354 return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries)) 1355 } 1356 1357 nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10) 1358 if result.ChunkSize == 0 { 1359 return nextStreamPosition, nil 1360 } 1361 1362 type pathToClear struct { 1363 path string 1364 entryType fs.EntryType 1365 } 1366 var pathsToClear []pathToClear 1367 newEventIDs := 0 1368 for _, entry := range result.Entries { 1369 eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID, 1370 entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID) 1371 1372 if entry.EventID == "" { 1373 fs.Debugf(f, "%s ignored due to missing EventID", eventDetails) 1374 continue 1375 } 1376 if _, ok := processedEventIDs[entry.EventID]; ok { 1377 fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails) 1378 continue 1379 } 1380 processedEventIDs[entry.EventID] = time.Now() 1381 newEventIDs++ 1382 1383 if entry.Source.ID == "" { // missing File or Folder ID 1384 fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails) 1385 continue 1386 } 1387 if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder 1388 fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails) 1389 continue 1390 } 1391 1392 // Only interested in event types that result in a file tree change 1393 if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found { 1394 fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails) 1395 continue 1396 } 1397 1398 f.itemMetaCacheMu.Lock() 1399 itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID] 1400 if cachedItemMetaFound { 1401 if itemMeta.SequenceID >= entry.Source.SequenceID { 1402 // Item in the cache has the same or newer SequenceID than 1403 // this event. Ignore this event, it must be old. 1404 f.itemMetaCacheMu.Unlock() 1405 fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID) 1406 continue 1407 } 1408 1409 // This event is newer. Delete its entry from the cache, 1410 // we'll notify about its change below, then it's up to a 1411 // future list operation to repopulate the cache. 1412 delete(f.itemMetaCache, entry.Source.ID) 1413 } 1414 f.itemMetaCacheMu.Unlock() 1415 1416 entryType := fs.EntryDirectory 1417 if entry.Source.Type == api.ItemTypeFile { 1418 entryType = fs.EntryObject 1419 } 1420 1421 // The box event only includes the new path for the object (e.g. 1422 // the path after the object was moved). If there was an old path 1423 // saved in our cache, it must be cleared. 1424 if cachedItemMetaFound { 1425 path := f.getFullPath(itemMeta.ParentID, itemMeta.Name) 1426 if path != "" { 1427 fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path) 1428 pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType}) 1429 } else { 1430 fs.Debugf(f, "%s old parent not cached", eventDetails) 1431 } 1432 1433 // If this is a directory, also delete it from the dir cache. 1434 // This will effectively invalidate the item metadata cache 1435 // entries for all descendents of this directory, since we 1436 // will no longer be able to construct a full path for them. 1437 // This is exactly what we want, since we don't want to notify 1438 // on the paths of these descendents if one of their ancestors 1439 // has been renamed/deleted. 1440 if entry.Source.Type == api.ItemTypeFolder { 1441 f.dirCache.FlushDir(path) 1442 } 1443 } 1444 1445 // If the item is "active", then it is not trashed or deleted, so 1446 // it potentially has a valid parent. 1447 // 1448 // Construct the new path of the object, based on the Parent ID 1449 // and its name. If we get an empty result, it means we don't 1450 // currently know about this object so notification is unnecessary. 1451 if entry.Source.ItemStatus == api.ItemStatusActive { 1452 path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name) 1453 if path != "" { 1454 fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path) 1455 pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType}) 1456 } else { 1457 fs.Debugf(f, "%s new parent not found", eventDetails) 1458 } 1459 } 1460 } 1461 1462 // box can sometimes repeatedly return the same Event IDs within a 1463 // short period of time. If it stops giving us new ones, treat it 1464 // the same as if it returned us none at all. 1465 if newEventIDs == 0 { 1466 return nextStreamPosition, nil 1467 } 1468 1469 notifiedPaths := make(map[string]bool) 1470 for _, p := range pathsToClear { 1471 if _, ok := notifiedPaths[p.path]; ok { 1472 continue 1473 } 1474 notifiedPaths[p.path] = true 1475 notifyFunc(p.path, p.entryType) 1476 } 1477 fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths)) 1478 } 1479 } 1480 1481 // DirCacheFlush resets the directory cache - used in testing as an 1482 // optional interface 1483 func (f *Fs) DirCacheFlush() { 1484 f.dirCache.ResetRoot() 1485 } 1486 1487 // Hashes returns the supported hash sets. 1488 func (f *Fs) Hashes() hash.Set { 1489 return hash.Set(hash.SHA1) 1490 } 1491 1492 // ------------------------------------------------------------ 1493 1494 // Fs returns the parent Fs 1495 func (o *Object) Fs() fs.Info { 1496 return o.fs 1497 } 1498 1499 // Return a string version 1500 func (o *Object) String() string { 1501 if o == nil { 1502 return "<nil>" 1503 } 1504 return o.remote 1505 } 1506 1507 // Remote returns the remote path 1508 func (o *Object) Remote() string { 1509 return o.remote 1510 } 1511 1512 // Hash returns the SHA-1 of an object returning a lowercase hex string 1513 func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { 1514 if t != hash.SHA1 { 1515 return "", hash.ErrUnsupported 1516 } 1517 return o.sha1, nil 1518 } 1519 1520 // Size returns the size of an object in bytes 1521 func (o *Object) Size() int64 { 1522 err := o.readMetaData(context.TODO()) 1523 if err != nil { 1524 fs.Logf(o, "Failed to read metadata: %v", err) 1525 return 0 1526 } 1527 return o.size 1528 } 1529 1530 // setMetaData sets the metadata from info 1531 func (o *Object) setMetaData(info *api.Item) (err error) { 1532 if info.Type == api.ItemTypeFolder { 1533 return fs.ErrorIsDir 1534 } 1535 if info.Type != api.ItemTypeFile { 1536 return fmt.Errorf("%q is %q: %w", o.remote, info.Type, fs.ErrorNotAFile) 1537 } 1538 o.hasMetaData = true 1539 o.size = int64(info.Size) 1540 o.sha1 = info.SHA1 1541 o.modTime = info.ModTime() 1542 o.id = info.ID 1543 o.publicLink = info.SharedLink.URL 1544 return nil 1545 } 1546 1547 // readMetaData gets the metadata if it hasn't already been fetched 1548 // 1549 // it also sets the info 1550 func (o *Object) readMetaData(ctx context.Context) (err error) { 1551 if o.hasMetaData { 1552 return nil 1553 } 1554 info, err := o.fs.readMetaDataForPath(ctx, o.remote) 1555 if err != nil { 1556 if apiErr, ok := err.(*api.Error); ok { 1557 if apiErr.Code == "not_found" || apiErr.Code == "trashed" { 1558 return fs.ErrorObjectNotFound 1559 } 1560 } 1561 return err 1562 } 1563 return o.setMetaData(info) 1564 } 1565 1566 // ModTime returns the modification time of the object 1567 // 1568 // It attempts to read the objects mtime and if that isn't present the 1569 // LastModified returned in the http headers 1570 func (o *Object) ModTime(ctx context.Context) time.Time { 1571 err := o.readMetaData(ctx) 1572 if err != nil { 1573 fs.Logf(o, "Failed to read metadata: %v", err) 1574 return time.Now() 1575 } 1576 return o.modTime 1577 } 1578 1579 // setModTime sets the modification time of the local fs object 1580 func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) { 1581 opts := rest.Opts{ 1582 Method: "PUT", 1583 Path: "/files/" + o.id, 1584 Parameters: fieldsValue(), 1585 } 1586 update := api.UpdateFileModTime{ 1587 ContentModifiedAt: api.Time(modTime), 1588 } 1589 var info *api.Item 1590 err := o.fs.pacer.Call(func() (bool, error) { 1591 resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info) 1592 return shouldRetry(ctx, resp, err) 1593 }) 1594 return info, err 1595 } 1596 1597 // SetModTime sets the modification time of the local fs object 1598 func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { 1599 info, err := o.setModTime(ctx, modTime) 1600 if err != nil { 1601 return err 1602 } 1603 return o.setMetaData(info) 1604 } 1605 1606 // Storable returns a boolean showing whether this object storable 1607 func (o *Object) Storable() bool { 1608 return true 1609 } 1610 1611 // Open an object for read 1612 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { 1613 if o.id == "" { 1614 return nil, errors.New("can't download - no id") 1615 } 1616 fs.FixRangeOption(options, o.size) 1617 var resp *http.Response 1618 opts := rest.Opts{ 1619 Method: "GET", 1620 Path: "/files/" + o.id + "/content", 1621 Options: options, 1622 } 1623 err = o.fs.pacer.Call(func() (bool, error) { 1624 resp, err = o.fs.srv.Call(ctx, &opts) 1625 return shouldRetry(ctx, resp, err) 1626 }) 1627 if err != nil { 1628 return nil, err 1629 } 1630 return resp.Body, err 1631 } 1632 1633 // upload does a single non-multipart upload 1634 // 1635 // This is recommended for less than 50 MiB of content 1636 func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) { 1637 upload := api.UploadFile{ 1638 Name: o.fs.opt.Enc.FromStandardName(leaf), 1639 ContentModifiedAt: api.Time(modTime), 1640 ContentCreatedAt: api.Time(modTime), 1641 Parent: api.Parent{ 1642 ID: directoryID, 1643 }, 1644 } 1645 1646 var resp *http.Response 1647 var result api.FolderItems 1648 opts := rest.Opts{ 1649 Method: "POST", 1650 Body: in, 1651 MultipartMetadataName: "attributes", 1652 MultipartContentName: "contents", 1653 MultipartFileName: upload.Name, 1654 RootURL: uploadURL, 1655 Options: options, 1656 } 1657 // If object has an ID then it is existing so create a new version 1658 if o.id != "" { 1659 opts.Path = "/files/" + o.id + "/content" 1660 } else { 1661 opts.Path = "/files/content" 1662 } 1663 err = o.fs.pacer.CallNoRetry(func() (bool, error) { 1664 resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result) 1665 return shouldRetry(ctx, resp, err) 1666 }) 1667 if err != nil { 1668 return err 1669 } 1670 if result.TotalCount != 1 || len(result.Entries) != 1 { 1671 return fmt.Errorf("failed to upload %v - not sure why", o) 1672 } 1673 return o.setMetaData(&result.Entries[0]) 1674 } 1675 1676 // Update the object with the contents of the io.Reader, modTime and size 1677 // 1678 // If existing is set then it updates the object rather than creating a new one. 1679 // 1680 // The new object may have been created if an error is returned. 1681 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { 1682 if o.fs.tokenRenewer != nil { 1683 o.fs.tokenRenewer.Start() 1684 defer o.fs.tokenRenewer.Stop() 1685 } 1686 1687 size := src.Size() 1688 modTime := src.ModTime(ctx) 1689 remote := o.Remote() 1690 1691 // Create the directory for the object if it doesn't exist 1692 leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) 1693 if err != nil { 1694 return err 1695 } 1696 1697 // Upload with simple or multipart 1698 if size <= int64(o.fs.opt.UploadCutoff) { 1699 err = o.upload(ctx, in, leaf, directoryID, modTime, options...) 1700 } else { 1701 err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...) 1702 } 1703 return err 1704 } 1705 1706 // Remove an object 1707 func (o *Object) Remove(ctx context.Context) error { 1708 return o.fs.deleteObject(ctx, o.id) 1709 } 1710 1711 // ID returns the ID of the Object if known, or "" if not 1712 func (o *Object) ID() string { 1713 return o.id 1714 } 1715 1716 // Check the interfaces are satisfied 1717 var ( 1718 _ fs.Fs = (*Fs)(nil) 1719 _ fs.Purger = (*Fs)(nil) 1720 _ fs.PutStreamer = (*Fs)(nil) 1721 _ fs.Copier = (*Fs)(nil) 1722 _ fs.Abouter = (*Fs)(nil) 1723 _ fs.Mover = (*Fs)(nil) 1724 _ fs.DirMover = (*Fs)(nil) 1725 _ fs.DirCacheFlusher = (*Fs)(nil) 1726 _ fs.PublicLinker = (*Fs)(nil) 1727 _ fs.CleanUpper = (*Fs)(nil) 1728 _ fs.Shutdowner = (*Fs)(nil) 1729 _ fs.Object = (*Object)(nil) 1730 _ fs.IDer = (*Object)(nil) 1731 )