github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/googlecloudstorage/googlecloudstorage.go (about) 1 // Package googlecloudstorage provides an interface to Google Cloud Storage 2 package googlecloudstorage 3 4 /* 5 Notes 6 7 Can't set Updated but can set Metadata on object creation 8 9 Patch needs full_control not just read_write 10 11 FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error 12 - https://code.google.com/p/google-api-go-client/issues/detail?id=64 13 */ 14 15 import ( 16 "context" 17 "encoding/base64" 18 "encoding/hex" 19 "fmt" 20 "io" 21 "io/ioutil" 22 "log" 23 "net/http" 24 "os" 25 "path" 26 "strings" 27 "time" 28 29 "github.com/pkg/errors" 30 "github.com/rclone/rclone/fs" 31 "github.com/rclone/rclone/fs/config" 32 "github.com/rclone/rclone/fs/config/configmap" 33 "github.com/rclone/rclone/fs/config/configstruct" 34 "github.com/rclone/rclone/fs/config/obscure" 35 "github.com/rclone/rclone/fs/fserrors" 36 "github.com/rclone/rclone/fs/fshttp" 37 "github.com/rclone/rclone/fs/hash" 38 "github.com/rclone/rclone/fs/walk" 39 "github.com/rclone/rclone/lib/bucket" 40 "github.com/rclone/rclone/lib/encoder" 41 "github.com/rclone/rclone/lib/oauthutil" 42 "github.com/rclone/rclone/lib/pacer" 43 "golang.org/x/oauth2" 44 "golang.org/x/oauth2/google" 45 "google.golang.org/api/googleapi" 46 47 // NOTE: This API is deprecated 48 storage "google.golang.org/api/storage/v1" 49 ) 50 51 const ( 52 rcloneClientID = "202264815644.apps.googleusercontent.com" 53 rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw" 54 timeFormatIn = time.RFC3339 55 timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" 56 metaMtime = "mtime" // key to store mtime under in metadata 57 listChunks = 1000 // chunk size to read directory listings 58 minSleep = 10 * time.Millisecond 59 ) 60 61 var ( 62 // Description of how to auth for this app 63 storageConfig = &oauth2.Config{ 64 Scopes: []string{storage.DevstorageReadWriteScope}, 65 Endpoint: google.Endpoint, 66 ClientID: rcloneClientID, 67 ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), 68 RedirectURL: oauthutil.TitleBarRedirectURL, 69 } 70 ) 71 72 // Register with Fs 73 func init() { 74 fs.Register(&fs.RegInfo{ 75 Name: "google cloud storage", 76 Prefix: "gcs", 77 Description: "Google Cloud Storage (this is not Google Drive)", 78 NewFs: NewFs, 79 Config: func(name string, m configmap.Mapper) { 80 saFile, _ := m.Get("service_account_file") 81 saCreds, _ := m.Get("service_account_credentials") 82 if saFile != "" || saCreds != "" { 83 return 84 } 85 err := oauthutil.Config("google cloud storage", name, m, storageConfig, nil) 86 if err != nil { 87 log.Fatalf("Failed to configure token: %v", err) 88 } 89 }, 90 Options: []fs.Option{{ 91 Name: config.ConfigClientID, 92 Help: "Google Application Client Id\nLeave blank normally.", 93 }, { 94 Name: config.ConfigClientSecret, 95 Help: "Google Application Client Secret\nLeave blank normally.", 96 }, { 97 Name: "project_number", 98 Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.", 99 }, { 100 Name: "service_account_file", 101 Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", 102 }, { 103 Name: "service_account_credentials", 104 Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", 105 Hide: fs.OptionHideBoth, 106 }, { 107 Name: "object_acl", 108 Help: "Access Control List for new objects.", 109 Examples: []fs.OptionExample{{ 110 Value: "authenticatedRead", 111 Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.", 112 }, { 113 Value: "bucketOwnerFullControl", 114 Help: "Object owner gets OWNER access, and project team owners get OWNER access.", 115 }, { 116 Value: "bucketOwnerRead", 117 Help: "Object owner gets OWNER access, and project team owners get READER access.", 118 }, { 119 Value: "private", 120 Help: "Object owner gets OWNER access [default if left blank].", 121 }, { 122 Value: "projectPrivate", 123 Help: "Object owner gets OWNER access, and project team members get access according to their roles.", 124 }, { 125 Value: "publicRead", 126 Help: "Object owner gets OWNER access, and all Users get READER access.", 127 }}, 128 }, { 129 Name: "bucket_acl", 130 Help: "Access Control List for new buckets.", 131 Examples: []fs.OptionExample{{ 132 Value: "authenticatedRead", 133 Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.", 134 }, { 135 Value: "private", 136 Help: "Project team owners get OWNER access [default if left blank].", 137 }, { 138 Value: "projectPrivate", 139 Help: "Project team members get access according to their roles.", 140 }, { 141 Value: "publicRead", 142 Help: "Project team owners get OWNER access, and all Users get READER access.", 143 }, { 144 Value: "publicReadWrite", 145 Help: "Project team owners get OWNER access, and all Users get WRITER access.", 146 }}, 147 }, { 148 Name: "bucket_policy_only", 149 Help: `Access checks should use bucket-level IAM policies. 150 151 If you want to upload objects to a bucket with Bucket Policy Only set 152 then you will need to set this. 153 154 When it is set, rclone: 155 156 - ignores ACLs set on buckets 157 - ignores ACLs set on objects 158 - creates buckets with Bucket Policy Only set 159 160 Docs: https://cloud.google.com/storage/docs/bucket-policy-only 161 `, 162 Default: false, 163 }, { 164 Name: "location", 165 Help: "Location for the newly created buckets.", 166 Examples: []fs.OptionExample{{ 167 Value: "", 168 Help: "Empty for default location (US).", 169 }, { 170 Value: "asia", 171 Help: "Multi-regional location for Asia.", 172 }, { 173 Value: "eu", 174 Help: "Multi-regional location for Europe.", 175 }, { 176 Value: "us", 177 Help: "Multi-regional location for United States.", 178 }, { 179 Value: "asia-east1", 180 Help: "Taiwan.", 181 }, { 182 Value: "asia-east2", 183 Help: "Hong Kong.", 184 }, { 185 Value: "asia-northeast1", 186 Help: "Tokyo.", 187 }, { 188 Value: "asia-south1", 189 Help: "Mumbai.", 190 }, { 191 Value: "asia-southeast1", 192 Help: "Singapore.", 193 }, { 194 Value: "australia-southeast1", 195 Help: "Sydney.", 196 }, { 197 Value: "europe-north1", 198 Help: "Finland.", 199 }, { 200 Value: "europe-west1", 201 Help: "Belgium.", 202 }, { 203 Value: "europe-west2", 204 Help: "London.", 205 }, { 206 Value: "europe-west3", 207 Help: "Frankfurt.", 208 }, { 209 Value: "europe-west4", 210 Help: "Netherlands.", 211 }, { 212 Value: "us-central1", 213 Help: "Iowa.", 214 }, { 215 Value: "us-east1", 216 Help: "South Carolina.", 217 }, { 218 Value: "us-east4", 219 Help: "Northern Virginia.", 220 }, { 221 Value: "us-west1", 222 Help: "Oregon.", 223 }, { 224 Value: "us-west2", 225 Help: "California.", 226 }}, 227 }, { 228 Name: "storage_class", 229 Help: "The storage class to use when storing objects in Google Cloud Storage.", 230 Examples: []fs.OptionExample{{ 231 Value: "", 232 Help: "Default", 233 }, { 234 Value: "MULTI_REGIONAL", 235 Help: "Multi-regional storage class", 236 }, { 237 Value: "REGIONAL", 238 Help: "Regional storage class", 239 }, { 240 Value: "NEARLINE", 241 Help: "Nearline storage class", 242 }, { 243 Value: "COLDLINE", 244 Help: "Coldline storage class", 245 }, { 246 Value: "ARCHIVE", 247 Help: "Archive storage class", 248 }, { 249 Value: "DURABLE_REDUCED_AVAILABILITY", 250 Help: "Durable reduced availability storage class", 251 }}, 252 }, { 253 Name: config.ConfigEncoding, 254 Help: config.ConfigEncodingHelp, 255 Advanced: true, 256 Default: (encoder.Base | 257 encoder.EncodeCrLf | 258 encoder.EncodeInvalidUtf8), 259 }}, 260 }) 261 } 262 263 // Options defines the configuration for this backend 264 type Options struct { 265 ProjectNumber string `config:"project_number"` 266 ServiceAccountFile string `config:"service_account_file"` 267 ServiceAccountCredentials string `config:"service_account_credentials"` 268 ObjectACL string `config:"object_acl"` 269 BucketACL string `config:"bucket_acl"` 270 BucketPolicyOnly bool `config:"bucket_policy_only"` 271 Location string `config:"location"` 272 StorageClass string `config:"storage_class"` 273 Enc encoder.MultiEncoder `config:"encoding"` 274 } 275 276 // Fs represents a remote storage server 277 type Fs struct { 278 name string // name of this remote 279 root string // the path we are working on if any 280 opt Options // parsed options 281 features *fs.Features // optional features 282 svc *storage.Service // the connection to the storage server 283 client *http.Client // authorized client 284 rootBucket string // bucket part of root (if any) 285 rootDirectory string // directory part of root (if any) 286 cache *bucket.Cache // cache of bucket status 287 pacer *fs.Pacer // To pace the API calls 288 } 289 290 // Object describes a storage object 291 // 292 // Will definitely have info but maybe not meta 293 type Object struct { 294 fs *Fs // what this object is part of 295 remote string // The remote path 296 url string // download path 297 md5sum string // The MD5Sum of the object 298 bytes int64 // Bytes in the object 299 modTime time.Time // Modified time of the object 300 mimeType string 301 } 302 303 // ------------------------------------------------------------ 304 305 // Name of the remote (as passed into NewFs) 306 func (f *Fs) Name() string { 307 return f.name 308 } 309 310 // Root of the remote (as passed into NewFs) 311 func (f *Fs) Root() string { 312 return f.root 313 } 314 315 // String converts this Fs to a string 316 func (f *Fs) String() string { 317 if f.rootBucket == "" { 318 return fmt.Sprintf("GCS root") 319 } 320 if f.rootDirectory == "" { 321 return fmt.Sprintf("GCS bucket %s", f.rootBucket) 322 } 323 return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory) 324 } 325 326 // Features returns the optional features of this Fs 327 func (f *Fs) Features() *fs.Features { 328 return f.features 329 } 330 331 // shouldRetry determines whether a given err rates being retried 332 func shouldRetry(err error) (again bool, errOut error) { 333 again = false 334 if err != nil { 335 if fserrors.ShouldRetry(err) { 336 again = true 337 } else { 338 switch gerr := err.(type) { 339 case *googleapi.Error: 340 if gerr.Code >= 500 && gerr.Code < 600 { 341 // All 5xx errors should be retried 342 again = true 343 } else if len(gerr.Errors) > 0 { 344 reason := gerr.Errors[0].Reason 345 if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" { 346 again = true 347 } 348 } 349 } 350 } 351 } 352 return again, err 353 } 354 355 // parsePath parses a remote 'url' 356 func parsePath(path string) (root string) { 357 root = strings.Trim(path, "/") 358 return 359 } 360 361 // split returns bucket and bucketPath from the rootRelativePath 362 // relative to f.root 363 func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { 364 bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath)) 365 return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) 366 } 367 368 // split returns bucket and bucketPath from the object 369 func (o *Object) split() (bucket, bucketPath string) { 370 return o.fs.split(o.remote) 371 } 372 373 func getServiceAccountClient(credentialsData []byte) (*http.Client, error) { 374 conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...) 375 if err != nil { 376 return nil, errors.Wrap(err, "error processing credentials") 377 } 378 ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config)) 379 return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil 380 } 381 382 // setRoot changes the root of the Fs 383 func (f *Fs) setRoot(root string) { 384 f.root = parsePath(root) 385 f.rootBucket, f.rootDirectory = bucket.Split(f.root) 386 } 387 388 // NewFs constructs an Fs from the path, bucket:path 389 func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { 390 ctx := context.TODO() 391 var oAuthClient *http.Client 392 393 // Parse config into Options struct 394 opt := new(Options) 395 err := configstruct.Set(m, opt) 396 if err != nil { 397 return nil, err 398 } 399 if opt.ObjectACL == "" { 400 opt.ObjectACL = "private" 401 } 402 if opt.BucketACL == "" { 403 opt.BucketACL = "private" 404 } 405 406 // try loading service account credentials from env variable, then from a file 407 if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { 408 loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile)) 409 if err != nil { 410 return nil, errors.Wrap(err, "error opening service account credentials file") 411 } 412 opt.ServiceAccountCredentials = string(loadedCreds) 413 } 414 if opt.ServiceAccountCredentials != "" { 415 oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials)) 416 if err != nil { 417 return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account") 418 } 419 } else { 420 oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig) 421 if err != nil { 422 ctx := context.Background() 423 oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope) 424 if err != nil { 425 return nil, errors.Wrap(err, "failed to configure Google Cloud Storage") 426 } 427 } 428 } 429 430 f := &Fs{ 431 name: name, 432 root: root, 433 opt: *opt, 434 pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))), 435 cache: bucket.NewCache(), 436 } 437 f.setRoot(root) 438 f.features = (&fs.Features{ 439 ReadMimeType: true, 440 WriteMimeType: true, 441 BucketBased: true, 442 BucketBasedRootOK: true, 443 }).Fill(f) 444 445 // Create a new authorized Drive client. 446 f.client = oAuthClient 447 f.svc, err = storage.New(f.client) 448 if err != nil { 449 return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client") 450 } 451 452 if f.rootBucket != "" && f.rootDirectory != "" { 453 // Check to see if the object exists 454 encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory) 455 err = f.pacer.Call(func() (bool, error) { 456 _, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do() 457 return shouldRetry(err) 458 }) 459 if err == nil { 460 newRoot := path.Dir(f.root) 461 if newRoot == "." { 462 newRoot = "" 463 } 464 f.setRoot(newRoot) 465 // return an error with an fs which points to the parent 466 return f, fs.ErrorIsFile 467 } 468 } 469 return f, nil 470 } 471 472 // Return an Object from a path 473 // 474 // If it can't be found it returns the error fs.ErrorObjectNotFound. 475 func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) { 476 o := &Object{ 477 fs: f, 478 remote: remote, 479 } 480 if info != nil { 481 o.setMetaData(info) 482 } else { 483 err := o.readMetaData(ctx) // reads info and meta, returning an error 484 if err != nil { 485 return nil, err 486 } 487 } 488 return o, nil 489 } 490 491 // NewObject finds the Object at remote. If it can't be found 492 // it returns the error fs.ErrorObjectNotFound. 493 func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { 494 return f.newObjectWithInfo(ctx, remote, nil) 495 } 496 497 // listFn is called from list to handle an object. 498 type listFn func(remote string, object *storage.Object, isDirectory bool) error 499 500 // list the objects into the function supplied 501 // 502 // dir is the starting directory, "" for root 503 // 504 // Set recurse to read sub directories 505 // 506 // The remote has prefix removed from it and if addBucket is set 507 // then it adds the bucket to the start. 508 func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) { 509 if prefix != "" { 510 prefix += "/" 511 } 512 if directory != "" { 513 directory += "/" 514 } 515 list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks) 516 if !recurse { 517 list = list.Delimiter("/") 518 } 519 for { 520 var objects *storage.Objects 521 err = f.pacer.Call(func() (bool, error) { 522 objects, err = list.Context(ctx).Do() 523 return shouldRetry(err) 524 }) 525 if err != nil { 526 if gErr, ok := err.(*googleapi.Error); ok { 527 if gErr.Code == http.StatusNotFound { 528 err = fs.ErrorDirNotFound 529 } 530 } 531 return err 532 } 533 if !recurse { 534 var object storage.Object 535 for _, remote := range objects.Prefixes { 536 if !strings.HasSuffix(remote, "/") { 537 continue 538 } 539 remote = f.opt.Enc.ToStandardPath(remote) 540 if !strings.HasPrefix(remote, prefix) { 541 fs.Logf(f, "Odd name received %q", remote) 542 continue 543 } 544 remote = remote[len(prefix) : len(remote)-1] 545 if addBucket { 546 remote = path.Join(bucket, remote) 547 } 548 err = fn(remote, &object, true) 549 if err != nil { 550 return err 551 } 552 } 553 } 554 for _, object := range objects.Items { 555 remote := f.opt.Enc.ToStandardPath(object.Name) 556 if !strings.HasPrefix(remote, prefix) { 557 fs.Logf(f, "Odd name received %q", object.Name) 558 continue 559 } 560 remote = remote[len(prefix):] 561 isDirectory := remote == "" || strings.HasSuffix(remote, "/") 562 if addBucket { 563 remote = path.Join(bucket, remote) 564 } 565 // is this a directory marker? 566 if isDirectory && object.Size == 0 { 567 continue // skip directory marker 568 } 569 err = fn(remote, object, false) 570 if err != nil { 571 return err 572 } 573 } 574 if objects.NextPageToken == "" { 575 break 576 } 577 list.PageToken(objects.NextPageToken) 578 } 579 return nil 580 } 581 582 // Convert a list item into a DirEntry 583 func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) { 584 if isDirectory { 585 d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size)) 586 return d, nil 587 } 588 o, err := f.newObjectWithInfo(ctx, remote, object) 589 if err != nil { 590 return nil, err 591 } 592 return o, nil 593 } 594 595 // listDir lists a single directory 596 func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) { 597 // List the objects 598 err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error { 599 entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) 600 if err != nil { 601 return err 602 } 603 if entry != nil { 604 entries = append(entries, entry) 605 } 606 return nil 607 }) 608 if err != nil { 609 return nil, err 610 } 611 // bucket must be present if listing succeeded 612 f.cache.MarkOK(bucket) 613 return entries, err 614 } 615 616 // listBuckets lists the buckets 617 func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { 618 if f.opt.ProjectNumber == "" { 619 return nil, errors.New("can't list buckets without project number") 620 } 621 listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks) 622 for { 623 var buckets *storage.Buckets 624 err = f.pacer.Call(func() (bool, error) { 625 buckets, err = listBuckets.Context(ctx).Do() 626 return shouldRetry(err) 627 }) 628 if err != nil { 629 return nil, err 630 } 631 for _, bucket := range buckets.Items { 632 d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{}) 633 entries = append(entries, d) 634 } 635 if buckets.NextPageToken == "" { 636 break 637 } 638 listBuckets.PageToken(buckets.NextPageToken) 639 } 640 return entries, nil 641 } 642 643 // List the objects and directories in dir into entries. The 644 // entries can be returned in any order but should be for a 645 // complete directory. 646 // 647 // dir should be "" to list the root, and should not have 648 // trailing slashes. 649 // 650 // This should return ErrDirNotFound if the directory isn't 651 // found. 652 func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { 653 bucket, directory := f.split(dir) 654 if bucket == "" { 655 if directory != "" { 656 return nil, fs.ErrorListBucketRequired 657 } 658 return f.listBuckets(ctx) 659 } 660 return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "") 661 } 662 663 // ListR lists the objects and directories of the Fs starting 664 // from dir recursively into out. 665 // 666 // dir should be "" to start from the root, and should not 667 // have trailing slashes. 668 // 669 // This should return ErrDirNotFound if the directory isn't 670 // found. 671 // 672 // It should call callback for each tranche of entries read. 673 // These need not be returned in any particular order. If 674 // callback returns an error then the listing will stop 675 // immediately. 676 // 677 // Don't implement this unless you have a more efficient way 678 // of listing recursively that doing a directory traversal. 679 func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { 680 bucket, directory := f.split(dir) 681 list := walk.NewListRHelper(callback) 682 listR := func(bucket, directory, prefix string, addBucket bool) error { 683 return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error { 684 entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) 685 if err != nil { 686 return err 687 } 688 return list.Add(entry) 689 }) 690 } 691 if bucket == "" { 692 entries, err := f.listBuckets(ctx) 693 if err != nil { 694 return err 695 } 696 for _, entry := range entries { 697 err = list.Add(entry) 698 if err != nil { 699 return err 700 } 701 bucket := entry.Remote() 702 err = listR(bucket, "", f.rootDirectory, true) 703 if err != nil { 704 return err 705 } 706 // bucket must be present if listing succeeded 707 f.cache.MarkOK(bucket) 708 } 709 } else { 710 err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "") 711 if err != nil { 712 return err 713 } 714 // bucket must be present if listing succeeded 715 f.cache.MarkOK(bucket) 716 } 717 return list.Flush() 718 } 719 720 // Put the object into the bucket 721 // 722 // Copy the reader in to the new object which is returned 723 // 724 // The new object may have been created if an error is returned 725 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 726 // Temporary Object under construction 727 o := &Object{ 728 fs: f, 729 remote: src.Remote(), 730 } 731 return o, o.Update(ctx, in, src, options...) 732 } 733 734 // PutStream uploads to the remote path with the modTime given of indeterminate size 735 func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 736 return f.Put(ctx, in, src, options...) 737 } 738 739 // Mkdir creates the bucket if it doesn't exist 740 func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { 741 bucket, _ := f.split(dir) 742 return f.makeBucket(ctx, bucket) 743 } 744 745 // makeBucket creates the bucket if it doesn't exist 746 func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) { 747 return f.cache.Create(bucket, func() error { 748 // List something from the bucket to see if it exists. Doing it like this enables the use of a 749 // service account that only has the "Storage Object Admin" role. See #2193 for details. 750 err = f.pacer.Call(func() (bool, error) { 751 _, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do() 752 return shouldRetry(err) 753 }) 754 if err == nil { 755 // Bucket already exists 756 return nil 757 } else if gErr, ok := err.(*googleapi.Error); ok { 758 if gErr.Code != http.StatusNotFound { 759 return errors.Wrap(err, "failed to get bucket") 760 } 761 } else { 762 return errors.Wrap(err, "failed to get bucket") 763 } 764 765 if f.opt.ProjectNumber == "" { 766 return errors.New("can't make bucket without project number") 767 } 768 769 bucket := storage.Bucket{ 770 Name: bucket, 771 Location: f.opt.Location, 772 StorageClass: f.opt.StorageClass, 773 } 774 if f.opt.BucketPolicyOnly { 775 bucket.IamConfiguration = &storage.BucketIamConfiguration{ 776 BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{ 777 Enabled: true, 778 }, 779 } 780 } 781 return f.pacer.Call(func() (bool, error) { 782 insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket) 783 if !f.opt.BucketPolicyOnly { 784 insertBucket.PredefinedAcl(f.opt.BucketACL) 785 } 786 _, err = insertBucket.Context(ctx).Do() 787 return shouldRetry(err) 788 }) 789 }, nil) 790 } 791 792 // Rmdir deletes the bucket if the fs is at the root 793 // 794 // Returns an error if it isn't empty: Error 409: The bucket you tried 795 // to delete was not empty. 796 func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { 797 bucket, directory := f.split(dir) 798 if bucket == "" || directory != "" { 799 return nil 800 } 801 return f.cache.Remove(bucket, func() error { 802 return f.pacer.Call(func() (bool, error) { 803 err = f.svc.Buckets.Delete(bucket).Context(ctx).Do() 804 return shouldRetry(err) 805 }) 806 }) 807 } 808 809 // Precision returns the precision 810 func (f *Fs) Precision() time.Duration { 811 return time.Nanosecond 812 } 813 814 // Copy src to this remote using server side copy operations. 815 // 816 // This is stored with the remote path given 817 // 818 // It returns the destination Object and a possible error 819 // 820 // Will only be called if src.Fs().Name() == f.Name() 821 // 822 // If it isn't possible then return fs.ErrorCantCopy 823 func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 824 dstBucket, dstPath := f.split(remote) 825 err := f.makeBucket(ctx, dstBucket) 826 if err != nil { 827 return nil, err 828 } 829 srcObj, ok := src.(*Object) 830 if !ok { 831 fs.Debugf(src, "Can't copy - not same remote type") 832 return nil, fs.ErrorCantCopy 833 } 834 srcBucket, srcPath := srcObj.split() 835 836 // Temporary Object under construction 837 dstObj := &Object{ 838 fs: f, 839 remote: remote, 840 } 841 842 var newObject *storage.Object 843 err = f.pacer.Call(func() (bool, error) { 844 copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil) 845 if !f.opt.BucketPolicyOnly { 846 copyObject.DestinationPredefinedAcl(f.opt.ObjectACL) 847 } 848 newObject, err = copyObject.Context(ctx).Do() 849 return shouldRetry(err) 850 }) 851 if err != nil { 852 return nil, err 853 } 854 // Set the metadata for the new object while we have it 855 dstObj.setMetaData(newObject) 856 return dstObj, nil 857 } 858 859 // Hashes returns the supported hash sets. 860 func (f *Fs) Hashes() hash.Set { 861 return hash.Set(hash.MD5) 862 } 863 864 // ------------------------------------------------------------ 865 866 // Fs returns the parent Fs 867 func (o *Object) Fs() fs.Info { 868 return o.fs 869 } 870 871 // Return a string version 872 func (o *Object) String() string { 873 if o == nil { 874 return "<nil>" 875 } 876 return o.remote 877 } 878 879 // Remote returns the remote path 880 func (o *Object) Remote() string { 881 return o.remote 882 } 883 884 // Hash returns the Md5sum of an object returning a lowercase hex string 885 func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { 886 if t != hash.MD5 { 887 return "", hash.ErrUnsupported 888 } 889 return o.md5sum, nil 890 } 891 892 // Size returns the size of an object in bytes 893 func (o *Object) Size() int64 { 894 return o.bytes 895 } 896 897 // setMetaData sets the fs data from a storage.Object 898 func (o *Object) setMetaData(info *storage.Object) { 899 o.url = info.MediaLink 900 o.bytes = int64(info.Size) 901 o.mimeType = info.ContentType 902 903 // Read md5sum 904 md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash) 905 if err != nil { 906 fs.Logf(o, "Bad MD5 decode: %v", err) 907 } else { 908 o.md5sum = hex.EncodeToString(md5sumData) 909 } 910 911 // read mtime out of metadata if available 912 mtimeString, ok := info.Metadata[metaMtime] 913 if ok { 914 modTime, err := time.Parse(timeFormatIn, mtimeString) 915 if err == nil { 916 o.modTime = modTime 917 return 918 } 919 fs.Debugf(o, "Failed to read mtime from metadata: %s", err) 920 } 921 922 // Fallback to the Updated time 923 modTime, err := time.Parse(timeFormatIn, info.Updated) 924 if err != nil { 925 fs.Logf(o, "Bad time decode: %v", err) 926 } else { 927 o.modTime = modTime 928 } 929 } 930 931 // readObjectInfo reads the definition for an object 932 func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) { 933 bucket, bucketPath := o.split() 934 err = o.fs.pacer.Call(func() (bool, error) { 935 object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do() 936 return shouldRetry(err) 937 }) 938 if err != nil { 939 if gErr, ok := err.(*googleapi.Error); ok { 940 if gErr.Code == http.StatusNotFound { 941 return nil, fs.ErrorObjectNotFound 942 } 943 } 944 return nil, err 945 } 946 return object, nil 947 } 948 949 // readMetaData gets the metadata if it hasn't already been fetched 950 // 951 // it also sets the info 952 func (o *Object) readMetaData(ctx context.Context) (err error) { 953 if !o.modTime.IsZero() { 954 return nil 955 } 956 object, err := o.readObjectInfo(ctx) 957 if err != nil { 958 return err 959 } 960 o.setMetaData(object) 961 return nil 962 } 963 964 // ModTime returns the modification time of the object 965 // 966 // It attempts to read the objects mtime and if that isn't present the 967 // LastModified returned in the http headers 968 func (o *Object) ModTime(ctx context.Context) time.Time { 969 err := o.readMetaData(ctx) 970 if err != nil { 971 // fs.Logf(o, "Failed to read metadata: %v", err) 972 return time.Now() 973 } 974 return o.modTime 975 } 976 977 // Returns metadata for an object 978 func metadataFromModTime(modTime time.Time) map[string]string { 979 metadata := make(map[string]string, 1) 980 metadata[metaMtime] = modTime.Format(timeFormatOut) 981 return metadata 982 } 983 984 // SetModTime sets the modification time of the local fs object 985 func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { 986 // read the complete existing object first 987 object, err := o.readObjectInfo(ctx) 988 if err != nil { 989 return err 990 } 991 // Add the mtime to the existing metadata 992 mtime := modTime.Format(timeFormatOut) 993 if object.Metadata == nil { 994 object.Metadata = make(map[string]string, 1) 995 } 996 object.Metadata[metaMtime] = mtime 997 // Copy the object to itself to update the metadata 998 // Using PATCH requires too many permissions 999 bucket, bucketPath := o.split() 1000 var newObject *storage.Object 1001 err = o.fs.pacer.Call(func() (bool, error) { 1002 copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object) 1003 if !o.fs.opt.BucketPolicyOnly { 1004 copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL) 1005 } 1006 newObject, err = copyObject.Context(ctx).Do() 1007 return shouldRetry(err) 1008 }) 1009 if err != nil { 1010 return err 1011 } 1012 o.setMetaData(newObject) 1013 return nil 1014 } 1015 1016 // Storable returns a boolean as to whether this object is storable 1017 func (o *Object) Storable() bool { 1018 return true 1019 } 1020 1021 // Open an object for read 1022 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { 1023 req, err := http.NewRequest("GET", o.url, nil) 1024 if err != nil { 1025 return nil, err 1026 } 1027 req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext 1028 fs.FixRangeOption(options, o.bytes) 1029 fs.OpenOptionAddHTTPHeaders(req.Header, options) 1030 var res *http.Response 1031 err = o.fs.pacer.Call(func() (bool, error) { 1032 res, err = o.fs.client.Do(req) 1033 if err == nil { 1034 err = googleapi.CheckResponse(res) 1035 if err != nil { 1036 _ = res.Body.Close() // ignore error 1037 } 1038 } 1039 return shouldRetry(err) 1040 }) 1041 if err != nil { 1042 return nil, err 1043 } 1044 _, isRanging := req.Header["Range"] 1045 if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) { 1046 _ = res.Body.Close() // ignore error 1047 return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status) 1048 } 1049 return res.Body, nil 1050 } 1051 1052 // Update the object with the contents of the io.Reader, modTime and size 1053 // 1054 // The new object may have been created if an error is returned 1055 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { 1056 bucket, bucketPath := o.split() 1057 err := o.fs.makeBucket(ctx, bucket) 1058 if err != nil { 1059 return err 1060 } 1061 modTime := src.ModTime(ctx) 1062 1063 object := storage.Object{ 1064 Bucket: bucket, 1065 Name: bucketPath, 1066 ContentType: fs.MimeType(ctx, src), 1067 Metadata: metadataFromModTime(modTime), 1068 } 1069 // Apply upload options 1070 for _, option := range options { 1071 key, value := option.Header() 1072 lowerKey := strings.ToLower(key) 1073 switch lowerKey { 1074 case "": 1075 // ignore 1076 case "cache-control": 1077 object.CacheControl = value 1078 case "content-disposition": 1079 object.ContentDisposition = value 1080 case "content-encoding": 1081 object.ContentEncoding = value 1082 case "content-language": 1083 object.ContentLanguage = value 1084 case "content-type": 1085 object.ContentType = value 1086 default: 1087 const googMetaPrefix = "x-goog-meta-" 1088 if strings.HasPrefix(lowerKey, googMetaPrefix) { 1089 metaKey := lowerKey[len(googMetaPrefix):] 1090 object.Metadata[metaKey] = value 1091 } else { 1092 fs.Errorf(o, "Don't know how to set key %q on upload", key) 1093 } 1094 } 1095 } 1096 var newObject *storage.Object 1097 err = o.fs.pacer.CallNoRetry(func() (bool, error) { 1098 insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name) 1099 if !o.fs.opt.BucketPolicyOnly { 1100 insertObject.PredefinedAcl(o.fs.opt.ObjectACL) 1101 } 1102 newObject, err = insertObject.Context(ctx).Do() 1103 return shouldRetry(err) 1104 }) 1105 if err != nil { 1106 return err 1107 } 1108 // Set the metadata for the new object while we have it 1109 o.setMetaData(newObject) 1110 return nil 1111 } 1112 1113 // Remove an object 1114 func (o *Object) Remove(ctx context.Context) (err error) { 1115 bucket, bucketPath := o.split() 1116 err = o.fs.pacer.Call(func() (bool, error) { 1117 err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do() 1118 return shouldRetry(err) 1119 }) 1120 return err 1121 } 1122 1123 // MimeType of an Object if known, "" otherwise 1124 func (o *Object) MimeType(ctx context.Context) string { 1125 return o.mimeType 1126 } 1127 1128 // Check the interfaces are satisfied 1129 var ( 1130 _ fs.Fs = &Fs{} 1131 _ fs.Copier = &Fs{} 1132 _ fs.PutStreamer = &Fs{} 1133 _ fs.ListRer = &Fs{} 1134 _ fs.Object = &Object{} 1135 _ fs.MimeTyper = &Object{} 1136 )