github.com/artpar/rclone@v1.67.3/backend/googlecloudstorage/googlecloudstorage.go (about) 1 // Package googlecloudstorage provides an interface to Google Cloud Storage 2 package googlecloudstorage 3 4 /* 5 Notes 6 7 Can't set Updated but can set Metadata on object creation 8 9 Patch needs full_control not just read_write 10 11 FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error 12 - https://code.google.com/p/google-api-go-client/issues/detail?id=64 13 */ 14 15 import ( 16 "context" 17 "encoding/base64" 18 "encoding/hex" 19 "errors" 20 "fmt" 21 "io" 22 "net/http" 23 "os" 24 "path" 25 "strconv" 26 "strings" 27 "sync" 28 "time" 29 30 "github.com/artpar/rclone/fs" 31 "github.com/artpar/rclone/fs/config" 32 "github.com/artpar/rclone/fs/config/configmap" 33 "github.com/artpar/rclone/fs/config/configstruct" 34 "github.com/artpar/rclone/fs/config/obscure" 35 "github.com/artpar/rclone/fs/fserrors" 36 "github.com/artpar/rclone/fs/fshttp" 37 "github.com/artpar/rclone/fs/hash" 38 "github.com/artpar/rclone/fs/walk" 39 "github.com/artpar/rclone/lib/bucket" 40 "github.com/artpar/rclone/lib/encoder" 41 "github.com/artpar/rclone/lib/env" 42 "github.com/artpar/rclone/lib/oauthutil" 43 "github.com/artpar/rclone/lib/pacer" 44 "golang.org/x/oauth2" 45 "golang.org/x/oauth2/google" 46 "google.golang.org/api/googleapi" 47 option "google.golang.org/api/option" 48 49 // NOTE: This API is deprecated 50 storage "google.golang.org/api/storage/v1" 51 ) 52 53 const ( 54 rcloneClientID = "202264815644.apps.googleusercontent.com" 55 rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw" 56 timeFormat = time.RFC3339Nano 57 metaMtime = "mtime" // key to store mtime in metadata 58 metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata 59 listChunks = 1000 // chunk size to read directory listings 60 minSleep = 10 * time.Millisecond 61 ) 62 63 var ( 64 // Description of how to auth for this app 65 storageConfig = &oauth2.Config{ 66 Scopes: []string{storage.DevstorageReadWriteScope}, 67 Endpoint: google.Endpoint, 68 ClientID: rcloneClientID, 69 ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), 70 RedirectURL: oauthutil.RedirectURL, 71 } 72 ) 73 74 // Register with Fs 75 func init() { 76 fs.Register(&fs.RegInfo{ 77 Name: "google cloud storage", 78 Prefix: "gcs", 79 Description: "Google Cloud Storage (this is not Google Drive)", 80 NewFs: NewFs, 81 Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { 82 saFile, _ := m.Get("service_account_file") 83 saCreds, _ := m.Get("service_account_credentials") 84 anonymous, _ := m.Get("anonymous") 85 envAuth, _ := m.Get("env_auth") 86 if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" { 87 return nil, nil 88 } 89 return oauthutil.ConfigOut("", &oauthutil.Options{ 90 OAuth2Config: storageConfig, 91 }) 92 }, 93 Options: append(oauthutil.SharedOptions, []fs.Option{{ 94 Name: "project_number", 95 Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.", 96 Sensitive: true, 97 }, { 98 Name: "user_project", 99 Help: "User project.\n\nOptional - needed only for requester pays.", 100 Sensitive: true, 101 }, { 102 Name: "service_account_file", 103 Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, 104 }, { 105 Name: "service_account_credentials", 106 Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", 107 Hide: fs.OptionHideBoth, 108 Sensitive: true, 109 }, { 110 Name: "anonymous", 111 Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.", 112 Default: false, 113 }, { 114 Name: "object_acl", 115 Help: "Access Control List for new objects.", 116 Examples: []fs.OptionExample{{ 117 Value: "authenticatedRead", 118 Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.", 119 }, { 120 Value: "bucketOwnerFullControl", 121 Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.", 122 }, { 123 Value: "bucketOwnerRead", 124 Help: "Object owner gets OWNER access.\nProject team owners get READER access.", 125 }, { 126 Value: "private", 127 Help: "Object owner gets OWNER access.\nDefault if left blank.", 128 }, { 129 Value: "projectPrivate", 130 Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.", 131 }, { 132 Value: "publicRead", 133 Help: "Object owner gets OWNER access.\nAll Users get READER access.", 134 }}, 135 }, { 136 Name: "bucket_acl", 137 Help: "Access Control List for new buckets.", 138 Examples: []fs.OptionExample{{ 139 Value: "authenticatedRead", 140 Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.", 141 }, { 142 Value: "private", 143 Help: "Project team owners get OWNER access.\nDefault if left blank.", 144 }, { 145 Value: "projectPrivate", 146 Help: "Project team members get access according to their roles.", 147 }, { 148 Value: "publicRead", 149 Help: "Project team owners get OWNER access.\nAll Users get READER access.", 150 }, { 151 Value: "publicReadWrite", 152 Help: "Project team owners get OWNER access.\nAll Users get WRITER access.", 153 }}, 154 }, { 155 Name: "bucket_policy_only", 156 Help: `Access checks should use bucket-level IAM policies. 157 158 If you want to upload objects to a bucket with Bucket Policy Only set 159 then you will need to set this. 160 161 When it is set, rclone: 162 163 - ignores ACLs set on buckets 164 - ignores ACLs set on objects 165 - creates buckets with Bucket Policy Only set 166 167 Docs: https://cloud.google.com/storage/docs/bucket-policy-only 168 `, 169 Default: false, 170 }, { 171 Name: "location", 172 Help: "Location for the newly created buckets.", 173 Examples: []fs.OptionExample{{ 174 Value: "", 175 Help: "Empty for default location (US)", 176 }, { 177 Value: "asia", 178 Help: "Multi-regional location for Asia", 179 }, { 180 Value: "eu", 181 Help: "Multi-regional location for Europe", 182 }, { 183 Value: "us", 184 Help: "Multi-regional location for United States", 185 }, { 186 Value: "asia-east1", 187 Help: "Taiwan", 188 }, { 189 Value: "asia-east2", 190 Help: "Hong Kong", 191 }, { 192 Value: "asia-northeast1", 193 Help: "Tokyo", 194 }, { 195 Value: "asia-northeast2", 196 Help: "Osaka", 197 }, { 198 Value: "asia-northeast3", 199 Help: "Seoul", 200 }, { 201 Value: "asia-south1", 202 Help: "Mumbai", 203 }, { 204 Value: "asia-south2", 205 Help: "Delhi", 206 }, { 207 Value: "asia-southeast1", 208 Help: "Singapore", 209 }, { 210 Value: "asia-southeast2", 211 Help: "Jakarta", 212 }, { 213 Value: "australia-southeast1", 214 Help: "Sydney", 215 }, { 216 Value: "australia-southeast2", 217 Help: "Melbourne", 218 }, { 219 Value: "europe-north1", 220 Help: "Finland", 221 }, { 222 Value: "europe-west1", 223 Help: "Belgium", 224 }, { 225 Value: "europe-west2", 226 Help: "London", 227 }, { 228 Value: "europe-west3", 229 Help: "Frankfurt", 230 }, { 231 Value: "europe-west4", 232 Help: "Netherlands", 233 }, { 234 Value: "europe-west6", 235 Help: "Zürich", 236 }, { 237 Value: "europe-central2", 238 Help: "Warsaw", 239 }, { 240 Value: "us-central1", 241 Help: "Iowa", 242 }, { 243 Value: "us-east1", 244 Help: "South Carolina", 245 }, { 246 Value: "us-east4", 247 Help: "Northern Virginia", 248 }, { 249 Value: "us-west1", 250 Help: "Oregon", 251 }, { 252 Value: "us-west2", 253 Help: "California", 254 }, { 255 Value: "us-west3", 256 Help: "Salt Lake City", 257 }, { 258 Value: "us-west4", 259 Help: "Las Vegas", 260 }, { 261 Value: "northamerica-northeast1", 262 Help: "Montréal", 263 }, { 264 Value: "northamerica-northeast2", 265 Help: "Toronto", 266 }, { 267 Value: "southamerica-east1", 268 Help: "São Paulo", 269 }, { 270 Value: "southamerica-west1", 271 Help: "Santiago", 272 }, { 273 Value: "asia1", 274 Help: "Dual region: asia-northeast1 and asia-northeast2.", 275 }, { 276 Value: "eur4", 277 Help: "Dual region: europe-north1 and europe-west4.", 278 }, { 279 Value: "nam4", 280 Help: "Dual region: us-central1 and us-east1.", 281 }}, 282 }, { 283 Name: "storage_class", 284 Help: "The storage class to use when storing objects in Google Cloud Storage.", 285 Examples: []fs.OptionExample{{ 286 Value: "", 287 Help: "Default", 288 }, { 289 Value: "MULTI_REGIONAL", 290 Help: "Multi-regional storage class", 291 }, { 292 Value: "REGIONAL", 293 Help: "Regional storage class", 294 }, { 295 Value: "NEARLINE", 296 Help: "Nearline storage class", 297 }, { 298 Value: "COLDLINE", 299 Help: "Coldline storage class", 300 }, { 301 Value: "ARCHIVE", 302 Help: "Archive storage class", 303 }, { 304 Value: "DURABLE_REDUCED_AVAILABILITY", 305 Help: "Durable reduced availability storage class", 306 }}, 307 }, { 308 Name: "directory_markers", 309 Default: false, 310 Advanced: true, 311 Help: `Upload an empty object with a trailing slash when a new directory is created 312 313 Empty folders are unsupported for bucket based remotes, this option creates an empty 314 object ending with "/", to persist the folder. 315 `, 316 }, { 317 Name: "no_check_bucket", 318 Help: `If set, don't attempt to check the bucket exists or create it. 319 320 This can be useful when trying to minimise the number of transactions 321 rclone does if you know the bucket exists already. 322 `, 323 Default: false, 324 Advanced: true, 325 }, { 326 Name: "decompress", 327 Help: `If set this will decompress gzip encoded objects. 328 329 It is possible to upload objects to GCS with "Content-Encoding: gzip" 330 set. Normally rclone will download these files as compressed objects. 331 332 If this flag is set then rclone will decompress these files with 333 "Content-Encoding: gzip" as they are received. This means that rclone 334 can't check the size and hash but the file contents will be decompressed. 335 `, 336 Advanced: true, 337 Default: false, 338 }, { 339 Name: "endpoint", 340 Help: "Endpoint for the service.\n\nLeave blank normally.", 341 Advanced: true, 342 }, { 343 Name: config.ConfigEncoding, 344 Help: config.ConfigEncodingHelp, 345 Advanced: true, 346 Default: (encoder.Base | 347 encoder.EncodeCrLf | 348 encoder.EncodeInvalidUtf8), 349 }, { 350 Name: "env_auth", 351 Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.", 352 Default: false, 353 Examples: []fs.OptionExample{{ 354 Value: "false", 355 Help: "Enter credentials in the next step.", 356 }, { 357 Value: "true", 358 Help: "Get GCP IAM credentials from the environment (env vars or IAM).", 359 }}, 360 }}...), 361 }) 362 } 363 364 // Options defines the configuration for this backend 365 type Options struct { 366 ProjectNumber string `config:"project_number"` 367 UserProject string `config:"user_project"` 368 ServiceAccountFile string `config:"service_account_file"` 369 ServiceAccountCredentials string `config:"service_account_credentials"` 370 Anonymous bool `config:"anonymous"` 371 ObjectACL string `config:"object_acl"` 372 BucketACL string `config:"bucket_acl"` 373 BucketPolicyOnly bool `config:"bucket_policy_only"` 374 Location string `config:"location"` 375 StorageClass string `config:"storage_class"` 376 NoCheckBucket bool `config:"no_check_bucket"` 377 Decompress bool `config:"decompress"` 378 Endpoint string `config:"endpoint"` 379 Enc encoder.MultiEncoder `config:"encoding"` 380 EnvAuth bool `config:"env_auth"` 381 DirectoryMarkers bool `config:"directory_markers"` 382 } 383 384 // Fs represents a remote storage server 385 type Fs struct { 386 name string // name of this remote 387 root string // the path we are working on if any 388 opt Options // parsed options 389 features *fs.Features // optional features 390 svc *storage.Service // the connection to the storage server 391 client *http.Client // authorized client 392 rootBucket string // bucket part of root (if any) 393 rootDirectory string // directory part of root (if any) 394 cache *bucket.Cache // cache of bucket status 395 pacer *fs.Pacer // To pace the API calls 396 warnCompressed sync.Once // warn once about compressed files 397 } 398 399 // Object describes a storage object 400 // 401 // Will definitely have info but maybe not meta 402 type Object struct { 403 fs *Fs // what this object is part of 404 remote string // The remote path 405 url string // download path 406 md5sum string // The MD5Sum of the object 407 bytes int64 // Bytes in the object 408 modTime time.Time // Modified time of the object 409 mimeType string 410 gzipped bool // set if object has Content-Encoding: gzip 411 } 412 413 // ------------------------------------------------------------ 414 415 // Name of the remote (as passed into NewFs) 416 func (f *Fs) Name() string { 417 return f.name 418 } 419 420 // Root of the remote (as passed into NewFs) 421 func (f *Fs) Root() string { 422 return f.root 423 } 424 425 // String converts this Fs to a string 426 func (f *Fs) String() string { 427 if f.rootBucket == "" { 428 return "GCS root" 429 } 430 if f.rootDirectory == "" { 431 return fmt.Sprintf("GCS bucket %s", f.rootBucket) 432 } 433 return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory) 434 } 435 436 // Features returns the optional features of this Fs 437 func (f *Fs) Features() *fs.Features { 438 return f.features 439 } 440 441 // shouldRetry determines whether a given err rates being retried 442 func shouldRetry(ctx context.Context, err error) (again bool, errOut error) { 443 if fserrors.ContextError(ctx, &err) { 444 return false, err 445 } 446 again = false 447 if err != nil { 448 if fserrors.ShouldRetry(err) { 449 again = true 450 } else { 451 switch gerr := err.(type) { 452 case *googleapi.Error: 453 if gerr.Code >= 500 && gerr.Code < 600 { 454 // All 5xx errors should be retried 455 again = true 456 } else if len(gerr.Errors) > 0 { 457 reason := gerr.Errors[0].Reason 458 if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" { 459 again = true 460 } 461 } 462 } 463 } 464 } 465 return again, err 466 } 467 468 // parsePath parses a remote 'url' 469 func parsePath(path string) (root string) { 470 root = strings.Trim(path, "/") 471 return 472 } 473 474 // split returns bucket and bucketPath from the rootRelativePath 475 // relative to f.root 476 func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { 477 bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath)) 478 return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) 479 } 480 481 // split returns bucket and bucketPath from the object 482 func (o *Object) split() (bucket, bucketPath string) { 483 return o.fs.split(o.remote) 484 } 485 486 func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) { 487 conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...) 488 if err != nil { 489 return nil, fmt.Errorf("error processing credentials: %w", err) 490 } 491 ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx)) 492 return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil 493 } 494 495 // setRoot changes the root of the Fs 496 func (f *Fs) setRoot(root string) { 497 f.root = parsePath(root) 498 f.rootBucket, f.rootDirectory = bucket.Split(f.root) 499 } 500 501 // NewFs constructs an Fs from the path, bucket:path 502 func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { 503 var oAuthClient *http.Client 504 505 // Parse config into Options struct 506 opt := new(Options) 507 err := configstruct.Set(m, opt) 508 if err != nil { 509 return nil, err 510 } 511 if opt.ObjectACL == "" { 512 opt.ObjectACL = "private" 513 } 514 if opt.BucketACL == "" { 515 opt.BucketACL = "private" 516 } 517 518 // try loading service account credentials from env variable, then from a file 519 if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { 520 loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) 521 if err != nil { 522 return nil, fmt.Errorf("error opening service account credentials file: %w", err) 523 } 524 opt.ServiceAccountCredentials = string(loadedCreds) 525 } 526 if opt.Anonymous { 527 oAuthClient = fshttp.NewClient(ctx) 528 } else if opt.ServiceAccountCredentials != "" { 529 oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials)) 530 if err != nil { 531 return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err) 532 } 533 } else if opt.EnvAuth { 534 oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope) 535 if err != nil { 536 return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err) 537 } 538 } else { 539 oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) 540 if err != nil { 541 ctx := context.Background() 542 oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope) 543 if err != nil { 544 return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err) 545 } 546 } 547 } 548 549 f := &Fs{ 550 name: name, 551 root: root, 552 opt: *opt, 553 pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))), 554 cache: bucket.NewCache(), 555 } 556 f.setRoot(root) 557 f.features = (&fs.Features{ 558 ReadMimeType: true, 559 WriteMimeType: true, 560 BucketBased: true, 561 BucketBasedRootOK: true, 562 }).Fill(ctx, f) 563 if opt.DirectoryMarkers { 564 f.features.CanHaveEmptyDirectories = true 565 } 566 567 // Create a new authorized Drive client. 568 f.client = oAuthClient 569 gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)} 570 if opt.Endpoint != "" { 571 gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint)) 572 } 573 f.svc, err = storage.NewService(context.Background(), gcsOpts...) 574 if err != nil { 575 return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err) 576 } 577 578 if f.rootBucket != "" && f.rootDirectory != "" { 579 // Check to see if the object exists 580 encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory) 581 err = f.pacer.Call(func() (bool, error) { 582 get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx) 583 if f.opt.UserProject != "" { 584 get = get.UserProject(f.opt.UserProject) 585 } 586 _, err = get.Do() 587 return shouldRetry(ctx, err) 588 }) 589 if err == nil { 590 newRoot := path.Dir(f.root) 591 if newRoot == "." { 592 newRoot = "" 593 } 594 f.setRoot(newRoot) 595 // return an error with an fs which points to the parent 596 return f, fs.ErrorIsFile 597 } 598 } 599 return f, nil 600 } 601 602 // Return an Object from a path 603 // 604 // If it can't be found it returns the error fs.ErrorObjectNotFound. 605 func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) { 606 o := &Object{ 607 fs: f, 608 remote: remote, 609 } 610 if info != nil { 611 o.setMetaData(info) 612 } else { 613 err := o.readMetaData(ctx) // reads info and meta, returning an error 614 if err != nil { 615 return nil, err 616 } 617 } 618 return o, nil 619 } 620 621 // NewObject finds the Object at remote. If it can't be found 622 // it returns the error fs.ErrorObjectNotFound. 623 func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { 624 return f.newObjectWithInfo(ctx, remote, nil) 625 } 626 627 // listFn is called from list to handle an object. 628 type listFn func(remote string, object *storage.Object, isDirectory bool) error 629 630 // list the objects into the function supplied 631 // 632 // dir is the starting directory, "" for root 633 // 634 // Set recurse to read sub directories. 635 // 636 // The remote has prefix removed from it and if addBucket is set 637 // then it adds the bucket to the start. 638 func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) { 639 if prefix != "" { 640 prefix += "/" 641 } 642 if directory != "" { 643 directory += "/" 644 } 645 list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks) 646 if f.opt.UserProject != "" { 647 list = list.UserProject(f.opt.UserProject) 648 } 649 if !recurse { 650 list = list.Delimiter("/") 651 } 652 foundItems := 0 653 for { 654 var objects *storage.Objects 655 err = f.pacer.Call(func() (bool, error) { 656 objects, err = list.Context(ctx).Do() 657 return shouldRetry(ctx, err) 658 }) 659 if err != nil { 660 if gErr, ok := err.(*googleapi.Error); ok { 661 if gErr.Code == http.StatusNotFound { 662 err = fs.ErrorDirNotFound 663 } 664 } 665 return err 666 } 667 if !recurse { 668 foundItems += len(objects.Prefixes) 669 var object storage.Object 670 for _, remote := range objects.Prefixes { 671 if !strings.HasSuffix(remote, "/") { 672 continue 673 } 674 remote = f.opt.Enc.ToStandardPath(remote) 675 if !strings.HasPrefix(remote, prefix) { 676 fs.Logf(f, "Odd name received %q", remote) 677 continue 678 } 679 remote = remote[len(prefix) : len(remote)-1] 680 if addBucket { 681 remote = path.Join(bucket, remote) 682 } 683 err = fn(remote, &object, true) 684 if err != nil { 685 return err 686 } 687 } 688 } 689 foundItems += len(objects.Items) 690 for _, object := range objects.Items { 691 remote := f.opt.Enc.ToStandardPath(object.Name) 692 if !strings.HasPrefix(remote, prefix) { 693 fs.Logf(f, "Odd name received %q", object.Name) 694 continue 695 } 696 isDirectory := remote == "" || strings.HasSuffix(remote, "/") 697 // is this a directory marker? 698 if isDirectory { 699 // Don't insert the root directory 700 if remote == directory { 701 continue 702 } 703 // process directory markers as directories 704 remote = strings.TrimRight(remote, "/") 705 } 706 remote = remote[len(prefix):] 707 if addBucket { 708 remote = path.Join(bucket, remote) 709 } 710 711 err = fn(remote, object, isDirectory) 712 if err != nil { 713 return err 714 } 715 } 716 if objects.NextPageToken == "" { 717 break 718 } 719 list.PageToken(objects.NextPageToken) 720 } 721 if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" { 722 // Determine whether the directory exists or not by whether it has a marker 723 _, err := f.readObjectInfo(ctx, bucket, directory) 724 if err != nil { 725 if err == fs.ErrorObjectNotFound { 726 return fs.ErrorDirNotFound 727 } 728 return err 729 } 730 } 731 732 return nil 733 } 734 735 // Convert a list item into a DirEntry 736 func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) { 737 if isDirectory { 738 d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size)) 739 return d, nil 740 } 741 o, err := f.newObjectWithInfo(ctx, remote, object) 742 if err != nil { 743 return nil, err 744 } 745 return o, nil 746 } 747 748 // listDir lists a single directory 749 func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) { 750 // List the objects 751 err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error { 752 entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) 753 if err != nil { 754 return err 755 } 756 if entry != nil { 757 entries = append(entries, entry) 758 } 759 return nil 760 }) 761 if err != nil { 762 return nil, err 763 } 764 // bucket must be present if listing succeeded 765 f.cache.MarkOK(bucket) 766 return entries, err 767 } 768 769 // listBuckets lists the buckets 770 func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { 771 if f.opt.ProjectNumber == "" { 772 return nil, errors.New("can't list buckets without project number") 773 } 774 listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks) 775 if f.opt.UserProject != "" { 776 listBuckets = listBuckets.UserProject(f.opt.UserProject) 777 } 778 for { 779 var buckets *storage.Buckets 780 err = f.pacer.Call(func() (bool, error) { 781 buckets, err = listBuckets.Context(ctx).Do() 782 return shouldRetry(ctx, err) 783 }) 784 if err != nil { 785 return nil, err 786 } 787 for _, bucket := range buckets.Items { 788 d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{}) 789 entries = append(entries, d) 790 } 791 if buckets.NextPageToken == "" { 792 break 793 } 794 listBuckets.PageToken(buckets.NextPageToken) 795 } 796 return entries, nil 797 } 798 799 // List the objects and directories in dir into entries. The 800 // entries can be returned in any order but should be for a 801 // complete directory. 802 // 803 // dir should be "" to list the root, and should not have 804 // trailing slashes. 805 // 806 // This should return ErrDirNotFound if the directory isn't 807 // found. 808 func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { 809 bucket, directory := f.split(dir) 810 if bucket == "" { 811 if directory != "" { 812 return nil, fs.ErrorListBucketRequired 813 } 814 return f.listBuckets(ctx) 815 } 816 return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "") 817 } 818 819 // ListR lists the objects and directories of the Fs starting 820 // from dir recursively into out. 821 // 822 // dir should be "" to start from the root, and should not 823 // have trailing slashes. 824 // 825 // This should return ErrDirNotFound if the directory isn't 826 // found. 827 // 828 // It should call callback for each tranche of entries read. 829 // These need not be returned in any particular order. If 830 // callback returns an error then the listing will stop 831 // immediately. 832 // 833 // Don't implement this unless you have a more efficient way 834 // of listing recursively that doing a directory traversal. 835 func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { 836 bucket, directory := f.split(dir) 837 list := walk.NewListRHelper(callback) 838 listR := func(bucket, directory, prefix string, addBucket bool) error { 839 return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error { 840 entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) 841 if err != nil { 842 return err 843 } 844 return list.Add(entry) 845 }) 846 } 847 if bucket == "" { 848 entries, err := f.listBuckets(ctx) 849 if err != nil { 850 return err 851 } 852 for _, entry := range entries { 853 err = list.Add(entry) 854 if err != nil { 855 return err 856 } 857 bucket := entry.Remote() 858 err = listR(bucket, "", f.rootDirectory, true) 859 if err != nil { 860 return err 861 } 862 // bucket must be present if listing succeeded 863 f.cache.MarkOK(bucket) 864 } 865 } else { 866 err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "") 867 if err != nil { 868 return err 869 } 870 // bucket must be present if listing succeeded 871 f.cache.MarkOK(bucket) 872 } 873 return list.Flush() 874 } 875 876 // Put the object into the bucket 877 // 878 // Copy the reader in to the new object which is returned. 879 // 880 // The new object may have been created if an error is returned 881 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 882 // Temporary Object under construction 883 o := &Object{ 884 fs: f, 885 remote: src.Remote(), 886 } 887 return o, o.Update(ctx, in, src, options...) 888 } 889 890 // PutStream uploads to the remote path with the modTime given of indeterminate size 891 func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 892 return f.Put(ctx, in, src, options...) 893 } 894 895 // Create directory marker file and parents 896 func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error { 897 if !f.opt.DirectoryMarkers || bucket == "" { 898 return nil 899 } 900 901 // Object to be uploaded 902 o := &Object{ 903 fs: f, 904 modTime: time.Now(), 905 } 906 907 for { 908 _, bucketPath := f.split(dir) 909 // Don't create the directory marker if it is the bucket or at the very root 910 if bucketPath == "" { 911 break 912 } 913 o.remote = dir + "/" 914 915 // Check to see if object already exists 916 _, err := o.readObjectInfo(ctx) 917 if err == nil { 918 return nil 919 } 920 921 // Upload it if not 922 fs.Debugf(o, "Creating directory marker") 923 content := io.Reader(strings.NewReader("")) 924 err = o.Update(ctx, content, o) 925 if err != nil { 926 return fmt.Errorf("creating directory marker failed: %w", err) 927 } 928 929 // Now check parent directory exists 930 dir = path.Dir(dir) 931 if dir == "/" || dir == "." { 932 break 933 } 934 } 935 936 return nil 937 } 938 939 // Mkdir creates the bucket if it doesn't exist 940 func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { 941 bucket, _ := f.split(dir) 942 e := f.checkBucket(ctx, bucket) 943 if e != nil { 944 return e 945 } 946 return f.createDirectoryMarker(ctx, bucket, dir) 947 948 } 949 950 // mkdirParent creates the parent bucket/directory if it doesn't exist 951 func (f *Fs) mkdirParent(ctx context.Context, remote string) error { 952 remote = strings.TrimRight(remote, "/") 953 dir := path.Dir(remote) 954 if dir == "/" || dir == "." { 955 dir = "" 956 } 957 return f.Mkdir(ctx, dir) 958 } 959 960 // makeBucket creates the bucket if it doesn't exist 961 func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) { 962 return f.cache.Create(bucket, func() error { 963 // List something from the bucket to see if it exists. Doing it like this enables the use of a 964 // service account that only has the "Storage Object Admin" role. See #2193 for details. 965 err = f.pacer.Call(func() (bool, error) { 966 list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx) 967 if f.opt.UserProject != "" { 968 list = list.UserProject(f.opt.UserProject) 969 } 970 _, err = list.Do() 971 return shouldRetry(ctx, err) 972 }) 973 if err == nil { 974 // Bucket already exists 975 return nil 976 } else if gErr, ok := err.(*googleapi.Error); ok { 977 if gErr.Code != http.StatusNotFound { 978 return fmt.Errorf("failed to get bucket: %w", err) 979 } 980 } else { 981 return fmt.Errorf("failed to get bucket: %w", err) 982 } 983 984 if f.opt.ProjectNumber == "" { 985 return errors.New("can't make bucket without project number") 986 } 987 988 bucket := storage.Bucket{ 989 Name: bucket, 990 Location: f.opt.Location, 991 StorageClass: f.opt.StorageClass, 992 } 993 if f.opt.BucketPolicyOnly { 994 bucket.IamConfiguration = &storage.BucketIamConfiguration{ 995 BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{ 996 Enabled: true, 997 }, 998 } 999 } 1000 return f.pacer.Call(func() (bool, error) { 1001 insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket) 1002 if !f.opt.BucketPolicyOnly { 1003 insertBucket.PredefinedAcl(f.opt.BucketACL) 1004 } 1005 insertBucket = insertBucket.Context(ctx) 1006 if f.opt.UserProject != "" { 1007 insertBucket = insertBucket.UserProject(f.opt.UserProject) 1008 } 1009 _, err = insertBucket.Do() 1010 return shouldRetry(ctx, err) 1011 }) 1012 }, nil) 1013 } 1014 1015 // checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true 1016 func (f *Fs) checkBucket(ctx context.Context, bucket string) error { 1017 if f.opt.NoCheckBucket { 1018 return nil 1019 } 1020 return f.makeBucket(ctx, bucket) 1021 } 1022 1023 // Rmdir deletes the bucket if the fs is at the root 1024 // 1025 // Returns an error if it isn't empty: Error 409: The bucket you tried 1026 // to delete was not empty. 1027 func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { 1028 bucket, directory := f.split(dir) 1029 // Remove directory marker file 1030 if f.opt.DirectoryMarkers && bucket != "" && dir != "" { 1031 o := &Object{ 1032 fs: f, 1033 remote: dir + "/", 1034 } 1035 fs.Debugf(o, "Removing directory marker") 1036 err := o.Remove(ctx) 1037 if err != nil { 1038 return fmt.Errorf("removing directory marker failed: %w", err) 1039 } 1040 } 1041 if bucket == "" || directory != "" { 1042 return nil 1043 } 1044 return f.cache.Remove(bucket, func() error { 1045 return f.pacer.Call(func() (bool, error) { 1046 deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx) 1047 if f.opt.UserProject != "" { 1048 deleteBucket = deleteBucket.UserProject(f.opt.UserProject) 1049 } 1050 err = deleteBucket.Do() 1051 return shouldRetry(ctx, err) 1052 }) 1053 }) 1054 } 1055 1056 // Precision returns the precision 1057 func (f *Fs) Precision() time.Duration { 1058 return time.Nanosecond 1059 } 1060 1061 // Copy src to this remote using server-side copy operations. 1062 // 1063 // This is stored with the remote path given. 1064 // 1065 // It returns the destination Object and a possible error. 1066 // 1067 // Will only be called if src.Fs().Name() == f.Name() 1068 // 1069 // If it isn't possible then return fs.ErrorCantCopy 1070 func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 1071 dstBucket, dstPath := f.split(remote) 1072 err := f.mkdirParent(ctx, remote) 1073 if err != nil { 1074 return nil, err 1075 } 1076 srcObj, ok := src.(*Object) 1077 if !ok { 1078 fs.Debugf(src, "Can't copy - not same remote type") 1079 return nil, fs.ErrorCantCopy 1080 } 1081 srcBucket, srcPath := srcObj.split() 1082 1083 // Temporary Object under construction 1084 dstObj := &Object{ 1085 fs: f, 1086 remote: remote, 1087 } 1088 1089 rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil) 1090 if !f.opt.BucketPolicyOnly { 1091 rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL) 1092 } 1093 var rewriteResponse *storage.RewriteResponse 1094 for { 1095 err = f.pacer.Call(func() (bool, error) { 1096 rewriteRequest = rewriteRequest.Context(ctx) 1097 if f.opt.UserProject != "" { 1098 rewriteRequest.UserProject(f.opt.UserProject) 1099 } 1100 rewriteResponse, err = rewriteRequest.Do() 1101 return shouldRetry(ctx, err) 1102 }) 1103 if err != nil { 1104 return nil, err 1105 } 1106 if rewriteResponse.Done { 1107 break 1108 } 1109 rewriteRequest.RewriteToken(rewriteResponse.RewriteToken) 1110 fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten) 1111 } 1112 // Set the metadata for the new object while we have it 1113 dstObj.setMetaData(rewriteResponse.Resource) 1114 return dstObj, nil 1115 } 1116 1117 // Hashes returns the supported hash sets. 1118 func (f *Fs) Hashes() hash.Set { 1119 return hash.Set(hash.MD5) 1120 } 1121 1122 // ------------------------------------------------------------ 1123 1124 // Fs returns the parent Fs 1125 func (o *Object) Fs() fs.Info { 1126 return o.fs 1127 } 1128 1129 // Return a string version 1130 func (o *Object) String() string { 1131 if o == nil { 1132 return "<nil>" 1133 } 1134 return o.remote 1135 } 1136 1137 // Remote returns the remote path 1138 func (o *Object) Remote() string { 1139 return o.remote 1140 } 1141 1142 // Hash returns the Md5sum of an object returning a lowercase hex string 1143 func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { 1144 if t != hash.MD5 { 1145 return "", hash.ErrUnsupported 1146 } 1147 return o.md5sum, nil 1148 } 1149 1150 // Size returns the size of an object in bytes 1151 func (o *Object) Size() int64 { 1152 return o.bytes 1153 } 1154 1155 // setMetaData sets the fs data from a storage.Object 1156 func (o *Object) setMetaData(info *storage.Object) { 1157 o.url = info.MediaLink 1158 o.bytes = int64(info.Size) 1159 o.mimeType = info.ContentType 1160 o.gzipped = info.ContentEncoding == "gzip" 1161 1162 // Read md5sum 1163 md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash) 1164 if err != nil { 1165 fs.Logf(o, "Bad MD5 decode: %v", err) 1166 } else { 1167 o.md5sum = hex.EncodeToString(md5sumData) 1168 } 1169 1170 // read mtime out of metadata if available 1171 mtimeString, ok := info.Metadata[metaMtime] 1172 if ok { 1173 modTime, err := time.Parse(timeFormat, mtimeString) 1174 if err == nil { 1175 o.modTime = modTime 1176 return 1177 } 1178 fs.Debugf(o, "Failed to read mtime from metadata: %s", err) 1179 } 1180 1181 // Fallback to GSUtil mtime 1182 mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil] 1183 if ok { 1184 unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64) 1185 if err == nil { 1186 o.modTime = time.Unix(unixTimeSec, 0) 1187 return 1188 } 1189 fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err) 1190 } 1191 1192 // Fallback to the Updated time 1193 modTime, err := time.Parse(timeFormat, info.Updated) 1194 if err != nil { 1195 fs.Logf(o, "Bad time decode: %v", err) 1196 } else { 1197 o.modTime = modTime 1198 } 1199 1200 // If gunzipping then size and md5sum are unknown 1201 if o.gzipped && o.fs.opt.Decompress { 1202 o.bytes = -1 1203 o.md5sum = "" 1204 } 1205 } 1206 1207 // readObjectInfo reads the definition for an object 1208 func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) { 1209 bucket, bucketPath := o.split() 1210 return o.fs.readObjectInfo(ctx, bucket, bucketPath) 1211 } 1212 1213 // readObjectInfo reads the definition for an object 1214 func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) { 1215 err = f.pacer.Call(func() (bool, error) { 1216 get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx) 1217 if f.opt.UserProject != "" { 1218 get = get.UserProject(f.opt.UserProject) 1219 } 1220 object, err = get.Do() 1221 return shouldRetry(ctx, err) 1222 }) 1223 if err != nil { 1224 if gErr, ok := err.(*googleapi.Error); ok { 1225 if gErr.Code == http.StatusNotFound { 1226 return nil, fs.ErrorObjectNotFound 1227 } 1228 } 1229 return nil, err 1230 } 1231 return object, nil 1232 } 1233 1234 // readMetaData gets the metadata if it hasn't already been fetched 1235 // 1236 // it also sets the info 1237 func (o *Object) readMetaData(ctx context.Context) (err error) { 1238 if !o.modTime.IsZero() { 1239 return nil 1240 } 1241 object, err := o.readObjectInfo(ctx) 1242 if err != nil { 1243 return err 1244 } 1245 o.setMetaData(object) 1246 return nil 1247 } 1248 1249 // ModTime returns the modification time of the object 1250 // 1251 // It attempts to read the objects mtime and if that isn't present the 1252 // LastModified returned in the http headers 1253 func (o *Object) ModTime(ctx context.Context) time.Time { 1254 err := o.readMetaData(ctx) 1255 if err != nil { 1256 // fs.Logf(o, "Failed to read metadata: %v", err) 1257 return time.Now() 1258 } 1259 return o.modTime 1260 } 1261 1262 // Returns metadata for an object 1263 func metadataFromModTime(modTime time.Time) map[string]string { 1264 metadata := make(map[string]string, 1) 1265 metadata[metaMtime] = modTime.Format(timeFormat) 1266 metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10) 1267 return metadata 1268 } 1269 1270 // SetModTime sets the modification time of the local fs object 1271 func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { 1272 // read the complete existing object first 1273 object, err := o.readObjectInfo(ctx) 1274 if err != nil { 1275 return err 1276 } 1277 // Add the mtime to the existing metadata 1278 if object.Metadata == nil { 1279 object.Metadata = make(map[string]string, 1) 1280 } 1281 object.Metadata[metaMtime] = modTime.Format(timeFormat) 1282 object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10) 1283 // Copy the object to itself to update the metadata 1284 // Using PATCH requires too many permissions 1285 bucket, bucketPath := o.split() 1286 var newObject *storage.Object 1287 err = o.fs.pacer.Call(func() (bool, error) { 1288 copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object) 1289 if !o.fs.opt.BucketPolicyOnly { 1290 copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL) 1291 } 1292 copyObject = copyObject.Context(ctx) 1293 if o.fs.opt.UserProject != "" { 1294 copyObject = copyObject.UserProject(o.fs.opt.UserProject) 1295 } 1296 newObject, err = copyObject.Do() 1297 return shouldRetry(ctx, err) 1298 }) 1299 if err != nil { 1300 return err 1301 } 1302 o.setMetaData(newObject) 1303 return nil 1304 } 1305 1306 // Storable returns a boolean as to whether this object is storable 1307 func (o *Object) Storable() bool { 1308 return true 1309 } 1310 1311 // Open an object for read 1312 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { 1313 url := o.url 1314 if o.fs.opt.UserProject != "" { 1315 url += "&userProject=" + o.fs.opt.UserProject 1316 } 1317 req, err := http.NewRequestWithContext(ctx, "GET", url, nil) 1318 if err != nil { 1319 return nil, err 1320 } 1321 fs.FixRangeOption(options, o.bytes) 1322 if o.gzipped && !o.fs.opt.Decompress { 1323 // Allow files which are stored on the cloud storage system 1324 // compressed to be downloaded without being decompressed. Note 1325 // that setting this here overrides the automatic decompression 1326 // in the Transport. 1327 // 1328 // See: https://cloud.google.com/storage/docs/transcoding 1329 req.Header.Set("Accept-Encoding", "gzip") 1330 o.fs.warnCompressed.Do(func() { 1331 fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-decompress to override") 1332 }) 1333 } 1334 fs.OpenOptionAddHTTPHeaders(req.Header, options) 1335 var res *http.Response 1336 err = o.fs.pacer.Call(func() (bool, error) { 1337 res, err = o.fs.client.Do(req) 1338 if err == nil { 1339 err = googleapi.CheckResponse(res) 1340 if err != nil { 1341 _ = res.Body.Close() // ignore error 1342 } 1343 } 1344 return shouldRetry(ctx, err) 1345 }) 1346 if err != nil { 1347 return nil, err 1348 } 1349 _, isRanging := req.Header["Range"] 1350 if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) { 1351 _ = res.Body.Close() // ignore error 1352 return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status) 1353 } 1354 return res.Body, nil 1355 } 1356 1357 // Update the object with the contents of the io.Reader, modTime and size 1358 // 1359 // The new object may have been created if an error is returned 1360 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { 1361 bucket, bucketPath := o.split() 1362 // Create parent dir/bucket if not saving directory marker 1363 if !strings.HasSuffix(o.remote, "/") { 1364 err = o.fs.mkdirParent(ctx, o.remote) 1365 if err != nil { 1366 return err 1367 } 1368 } 1369 modTime := src.ModTime(ctx) 1370 1371 object := storage.Object{ 1372 Bucket: bucket, 1373 Name: bucketPath, 1374 ContentType: fs.MimeType(ctx, src), 1375 Metadata: metadataFromModTime(modTime), 1376 } 1377 // Apply upload options 1378 for _, option := range options { 1379 key, value := option.Header() 1380 lowerKey := strings.ToLower(key) 1381 switch lowerKey { 1382 case "": 1383 // ignore 1384 case "cache-control": 1385 object.CacheControl = value 1386 case "content-disposition": 1387 object.ContentDisposition = value 1388 case "content-encoding": 1389 object.ContentEncoding = value 1390 case "content-language": 1391 object.ContentLanguage = value 1392 case "content-type": 1393 object.ContentType = value 1394 case "x-goog-storage-class": 1395 object.StorageClass = value 1396 default: 1397 const googMetaPrefix = "x-goog-meta-" 1398 if strings.HasPrefix(lowerKey, googMetaPrefix) { 1399 metaKey := lowerKey[len(googMetaPrefix):] 1400 object.Metadata[metaKey] = value 1401 } else { 1402 fs.Errorf(o, "Don't know how to set key %q on upload", key) 1403 } 1404 } 1405 } 1406 var newObject *storage.Object 1407 err = o.fs.pacer.CallNoRetry(func() (bool, error) { 1408 insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name) 1409 if !o.fs.opt.BucketPolicyOnly { 1410 insertObject.PredefinedAcl(o.fs.opt.ObjectACL) 1411 } 1412 insertObject = insertObject.Context(ctx) 1413 if o.fs.opt.UserProject != "" { 1414 insertObject = insertObject.UserProject(o.fs.opt.UserProject) 1415 } 1416 newObject, err = insertObject.Do() 1417 return shouldRetry(ctx, err) 1418 }) 1419 if err != nil { 1420 return err 1421 } 1422 // Set the metadata for the new object while we have it 1423 o.setMetaData(newObject) 1424 return nil 1425 } 1426 1427 // Remove an object 1428 func (o *Object) Remove(ctx context.Context) (err error) { 1429 bucket, bucketPath := o.split() 1430 err = o.fs.pacer.Call(func() (bool, error) { 1431 deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx) 1432 if o.fs.opt.UserProject != "" { 1433 deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject) 1434 } 1435 err = deleteBucket.Do() 1436 return shouldRetry(ctx, err) 1437 }) 1438 return err 1439 } 1440 1441 // MimeType of an Object if known, "" otherwise 1442 func (o *Object) MimeType(ctx context.Context) string { 1443 return o.mimeType 1444 } 1445 1446 // Check the interfaces are satisfied 1447 var ( 1448 _ fs.Fs = &Fs{} 1449 _ fs.Copier = &Fs{} 1450 _ fs.PutStreamer = &Fs{} 1451 _ fs.ListRer = &Fs{} 1452 _ fs.Object = &Object{} 1453 _ fs.MimeTyper = &Object{} 1454 )