github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/backend/googlecloudstorage/googlecloudstorage.go (about)

     1  // Package googlecloudstorage provides an interface to Google Cloud Storage
     2  package googlecloudstorage
     3  
     4  /*
     5  Notes
     6  
     7  Can't set Updated but can set Metadata on object creation
     8  
     9  Patch needs full_control not just read_write
    10  
    11  FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error
    12  - https://code.google.com/p/google-api-go-client/issues/detail?id=64
    13  */
    14  
    15  import (
    16  	"context"
    17  	"encoding/base64"
    18  	"encoding/hex"
    19  	"fmt"
    20  	"io"
    21  	"io/ioutil"
    22  	"log"
    23  	"net/http"
    24  	"os"
    25  	"path"
    26  	"strings"
    27  	"time"
    28  
    29  	"github.com/pkg/errors"
    30  	"github.com/rclone/rclone/fs"
    31  	"github.com/rclone/rclone/fs/config"
    32  	"github.com/rclone/rclone/fs/config/configmap"
    33  	"github.com/rclone/rclone/fs/config/configstruct"
    34  	"github.com/rclone/rclone/fs/config/obscure"
    35  	"github.com/rclone/rclone/fs/fserrors"
    36  	"github.com/rclone/rclone/fs/fshttp"
    37  	"github.com/rclone/rclone/fs/hash"
    38  	"github.com/rclone/rclone/fs/walk"
    39  	"github.com/rclone/rclone/lib/bucket"
    40  	"github.com/rclone/rclone/lib/encoder"
    41  	"github.com/rclone/rclone/lib/oauthutil"
    42  	"github.com/rclone/rclone/lib/pacer"
    43  	"golang.org/x/oauth2"
    44  	"golang.org/x/oauth2/google"
    45  	"google.golang.org/api/googleapi"
    46  
    47  	// NOTE: This API is deprecated
    48  	storage "google.golang.org/api/storage/v1"
    49  )
    50  
    51  const (
    52  	rcloneClientID              = "202264815644.apps.googleusercontent.com"
    53  	rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
    54  	timeFormatIn                = time.RFC3339
    55  	timeFormatOut               = "2006-01-02T15:04:05.000000000Z07:00"
    56  	metaMtime                   = "mtime" // key to store mtime under in metadata
    57  	listChunks                  = 1000    // chunk size to read directory listings
    58  	minSleep                    = 10 * time.Millisecond
    59  )
    60  
    61  var (
    62  	// Description of how to auth for this app
    63  	storageConfig = &oauth2.Config{
    64  		Scopes:       []string{storage.DevstorageReadWriteScope},
    65  		Endpoint:     google.Endpoint,
    66  		ClientID:     rcloneClientID,
    67  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
    68  		RedirectURL:  oauthutil.TitleBarRedirectURL,
    69  	}
    70  )
    71  
    72  // Register with Fs
    73  func init() {
    74  	fs.Register(&fs.RegInfo{
    75  		Name:        "google cloud storage",
    76  		Prefix:      "gcs",
    77  		Description: "Google Cloud Storage (this is not Google Drive)",
    78  		NewFs:       NewFs,
    79  		Config: func(name string, m configmap.Mapper) {
    80  			saFile, _ := m.Get("service_account_file")
    81  			saCreds, _ := m.Get("service_account_credentials")
    82  			if saFile != "" || saCreds != "" {
    83  				return
    84  			}
    85  			err := oauthutil.Config("google cloud storage", name, m, storageConfig)
    86  			if err != nil {
    87  				log.Fatalf("Failed to configure token: %v", err)
    88  			}
    89  		},
    90  		Options: []fs.Option{{
    91  			Name: config.ConfigClientID,
    92  			Help: "Google Application Client Id\nLeave blank normally.",
    93  		}, {
    94  			Name: config.ConfigClientSecret,
    95  			Help: "Google Application Client Secret\nLeave blank normally.",
    96  		}, {
    97  			Name: "project_number",
    98  			Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
    99  		}, {
   100  			Name: "service_account_file",
   101  			Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   102  		}, {
   103  			Name: "service_account_credentials",
   104  			Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   105  			Hide: fs.OptionHideBoth,
   106  		}, {
   107  			Name: "object_acl",
   108  			Help: "Access Control List for new objects.",
   109  			Examples: []fs.OptionExample{{
   110  				Value: "authenticatedRead",
   111  				Help:  "Object owner gets OWNER access, and all Authenticated Users get READER access.",
   112  			}, {
   113  				Value: "bucketOwnerFullControl",
   114  				Help:  "Object owner gets OWNER access, and project team owners get OWNER access.",
   115  			}, {
   116  				Value: "bucketOwnerRead",
   117  				Help:  "Object owner gets OWNER access, and project team owners get READER access.",
   118  			}, {
   119  				Value: "private",
   120  				Help:  "Object owner gets OWNER access [default if left blank].",
   121  			}, {
   122  				Value: "projectPrivate",
   123  				Help:  "Object owner gets OWNER access, and project team members get access according to their roles.",
   124  			}, {
   125  				Value: "publicRead",
   126  				Help:  "Object owner gets OWNER access, and all Users get READER access.",
   127  			}},
   128  		}, {
   129  			Name: "bucket_acl",
   130  			Help: "Access Control List for new buckets.",
   131  			Examples: []fs.OptionExample{{
   132  				Value: "authenticatedRead",
   133  				Help:  "Project team owners get OWNER access, and all Authenticated Users get READER access.",
   134  			}, {
   135  				Value: "private",
   136  				Help:  "Project team owners get OWNER access [default if left blank].",
   137  			}, {
   138  				Value: "projectPrivate",
   139  				Help:  "Project team members get access according to their roles.",
   140  			}, {
   141  				Value: "publicRead",
   142  				Help:  "Project team owners get OWNER access, and all Users get READER access.",
   143  			}, {
   144  				Value: "publicReadWrite",
   145  				Help:  "Project team owners get OWNER access, and all Users get WRITER access.",
   146  			}},
   147  		}, {
   148  			Name: "bucket_policy_only",
   149  			Help: `Access checks should use bucket-level IAM policies.
   150  
   151  If you want to upload objects to a bucket with Bucket Policy Only set
   152  then you will need to set this.
   153  
   154  When it is set, rclone:
   155  
   156  - ignores ACLs set on buckets
   157  - ignores ACLs set on objects
   158  - creates buckets with Bucket Policy Only set
   159  
   160  Docs: https://cloud.google.com/storage/docs/bucket-policy-only
   161  `,
   162  			Default: false,
   163  		}, {
   164  			Name: "location",
   165  			Help: "Location for the newly created buckets.",
   166  			Examples: []fs.OptionExample{{
   167  				Value: "",
   168  				Help:  "Empty for default location (US).",
   169  			}, {
   170  				Value: "asia",
   171  				Help:  "Multi-regional location for Asia.",
   172  			}, {
   173  				Value: "eu",
   174  				Help:  "Multi-regional location for Europe.",
   175  			}, {
   176  				Value: "us",
   177  				Help:  "Multi-regional location for United States.",
   178  			}, {
   179  				Value: "asia-east1",
   180  				Help:  "Taiwan.",
   181  			}, {
   182  				Value: "asia-east2",
   183  				Help:  "Hong Kong.",
   184  			}, {
   185  				Value: "asia-northeast1",
   186  				Help:  "Tokyo.",
   187  			}, {
   188  				Value: "asia-south1",
   189  				Help:  "Mumbai.",
   190  			}, {
   191  				Value: "asia-southeast1",
   192  				Help:  "Singapore.",
   193  			}, {
   194  				Value: "australia-southeast1",
   195  				Help:  "Sydney.",
   196  			}, {
   197  				Value: "europe-north1",
   198  				Help:  "Finland.",
   199  			}, {
   200  				Value: "europe-west1",
   201  				Help:  "Belgium.",
   202  			}, {
   203  				Value: "europe-west2",
   204  				Help:  "London.",
   205  			}, {
   206  				Value: "europe-west3",
   207  				Help:  "Frankfurt.",
   208  			}, {
   209  				Value: "europe-west4",
   210  				Help:  "Netherlands.",
   211  			}, {
   212  				Value: "us-central1",
   213  				Help:  "Iowa.",
   214  			}, {
   215  				Value: "us-east1",
   216  				Help:  "South Carolina.",
   217  			}, {
   218  				Value: "us-east4",
   219  				Help:  "Northern Virginia.",
   220  			}, {
   221  				Value: "us-west1",
   222  				Help:  "Oregon.",
   223  			}, {
   224  				Value: "us-west2",
   225  				Help:  "California.",
   226  			}},
   227  		}, {
   228  			Name: "storage_class",
   229  			Help: "The storage class to use when storing objects in Google Cloud Storage.",
   230  			Examples: []fs.OptionExample{{
   231  				Value: "",
   232  				Help:  "Default",
   233  			}, {
   234  				Value: "MULTI_REGIONAL",
   235  				Help:  "Multi-regional storage class",
   236  			}, {
   237  				Value: "REGIONAL",
   238  				Help:  "Regional storage class",
   239  			}, {
   240  				Value: "NEARLINE",
   241  				Help:  "Nearline storage class",
   242  			}, {
   243  				Value: "COLDLINE",
   244  				Help:  "Coldline storage class",
   245  			}, {
   246  				Value: "DURABLE_REDUCED_AVAILABILITY",
   247  				Help:  "Durable reduced availability storage class",
   248  			}},
   249  		}, {
   250  			Name:     config.ConfigEncoding,
   251  			Help:     config.ConfigEncodingHelp,
   252  			Advanced: true,
   253  			Default: (encoder.Base |
   254  				encoder.EncodeCrLf |
   255  				encoder.EncodeInvalidUtf8),
   256  		}},
   257  	})
   258  }
   259  
   260  // Options defines the configuration for this backend
   261  type Options struct {
   262  	ProjectNumber             string               `config:"project_number"`
   263  	ServiceAccountFile        string               `config:"service_account_file"`
   264  	ServiceAccountCredentials string               `config:"service_account_credentials"`
   265  	ObjectACL                 string               `config:"object_acl"`
   266  	BucketACL                 string               `config:"bucket_acl"`
   267  	BucketPolicyOnly          bool                 `config:"bucket_policy_only"`
   268  	Location                  string               `config:"location"`
   269  	StorageClass              string               `config:"storage_class"`
   270  	Enc                       encoder.MultiEncoder `config:"encoding"`
   271  }
   272  
   273  // Fs represents a remote storage server
   274  type Fs struct {
   275  	name          string           // name of this remote
   276  	root          string           // the path we are working on if any
   277  	opt           Options          // parsed options
   278  	features      *fs.Features     // optional features
   279  	svc           *storage.Service // the connection to the storage server
   280  	client        *http.Client     // authorized client
   281  	rootBucket    string           // bucket part of root (if any)
   282  	rootDirectory string           // directory part of root (if any)
   283  	cache         *bucket.Cache    // cache of bucket status
   284  	pacer         *fs.Pacer        // To pace the API calls
   285  }
   286  
   287  // Object describes a storage object
   288  //
   289  // Will definitely have info but maybe not meta
   290  type Object struct {
   291  	fs       *Fs       // what this object is part of
   292  	remote   string    // The remote path
   293  	url      string    // download path
   294  	md5sum   string    // The MD5Sum of the object
   295  	bytes    int64     // Bytes in the object
   296  	modTime  time.Time // Modified time of the object
   297  	mimeType string
   298  }
   299  
   300  // ------------------------------------------------------------
   301  
   302  // Name of the remote (as passed into NewFs)
   303  func (f *Fs) Name() string {
   304  	return f.name
   305  }
   306  
   307  // Root of the remote (as passed into NewFs)
   308  func (f *Fs) Root() string {
   309  	return f.root
   310  }
   311  
   312  // String converts this Fs to a string
   313  func (f *Fs) String() string {
   314  	if f.rootBucket == "" {
   315  		return fmt.Sprintf("GCS root")
   316  	}
   317  	if f.rootDirectory == "" {
   318  		return fmt.Sprintf("GCS bucket %s", f.rootBucket)
   319  	}
   320  	return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
   321  }
   322  
   323  // Features returns the optional features of this Fs
   324  func (f *Fs) Features() *fs.Features {
   325  	return f.features
   326  }
   327  
   328  // shouldRetry determines whether a given err rates being retried
   329  func shouldRetry(err error) (again bool, errOut error) {
   330  	again = false
   331  	if err != nil {
   332  		if fserrors.ShouldRetry(err) {
   333  			again = true
   334  		} else {
   335  			switch gerr := err.(type) {
   336  			case *googleapi.Error:
   337  				if gerr.Code >= 500 && gerr.Code < 600 {
   338  					// All 5xx errors should be retried
   339  					again = true
   340  				} else if len(gerr.Errors) > 0 {
   341  					reason := gerr.Errors[0].Reason
   342  					if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
   343  						again = true
   344  					}
   345  				}
   346  			}
   347  		}
   348  	}
   349  	return again, err
   350  }
   351  
   352  // parsePath parses a remote 'url'
   353  func parsePath(path string) (root string) {
   354  	root = strings.Trim(path, "/")
   355  	return
   356  }
   357  
   358  // split returns bucket and bucketPath from the rootRelativePath
   359  // relative to f.root
   360  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
   361  	bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
   362  	return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
   363  }
   364  
   365  // split returns bucket and bucketPath from the object
   366  func (o *Object) split() (bucket, bucketPath string) {
   367  	return o.fs.split(o.remote)
   368  }
   369  
   370  func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
   371  	conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
   372  	if err != nil {
   373  		return nil, errors.Wrap(err, "error processing credentials")
   374  	}
   375  	ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
   376  	return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
   377  }
   378  
   379  // setRoot changes the root of the Fs
   380  func (f *Fs) setRoot(root string) {
   381  	f.root = parsePath(root)
   382  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
   383  }
   384  
   385  // NewFs constructs an Fs from the path, bucket:path
   386  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   387  	ctx := context.TODO()
   388  	var oAuthClient *http.Client
   389  
   390  	// Parse config into Options struct
   391  	opt := new(Options)
   392  	err := configstruct.Set(m, opt)
   393  	if err != nil {
   394  		return nil, err
   395  	}
   396  	if opt.ObjectACL == "" {
   397  		opt.ObjectACL = "private"
   398  	}
   399  	if opt.BucketACL == "" {
   400  		opt.BucketACL = "private"
   401  	}
   402  
   403  	// try loading service account credentials from env variable, then from a file
   404  	if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
   405  		loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
   406  		if err != nil {
   407  			return nil, errors.Wrap(err, "error opening service account credentials file")
   408  		}
   409  		opt.ServiceAccountCredentials = string(loadedCreds)
   410  	}
   411  	if opt.ServiceAccountCredentials != "" {
   412  		oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
   413  		if err != nil {
   414  			return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
   415  		}
   416  	} else {
   417  		oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
   418  		if err != nil {
   419  			ctx := context.Background()
   420  			oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
   421  			if err != nil {
   422  				return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
   423  			}
   424  		}
   425  	}
   426  
   427  	f := &Fs{
   428  		name:  name,
   429  		root:  root,
   430  		opt:   *opt,
   431  		pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
   432  		cache: bucket.NewCache(),
   433  	}
   434  	f.setRoot(root)
   435  	f.features = (&fs.Features{
   436  		ReadMimeType:      true,
   437  		WriteMimeType:     true,
   438  		BucketBased:       true,
   439  		BucketBasedRootOK: true,
   440  	}).Fill(f)
   441  
   442  	// Create a new authorized Drive client.
   443  	f.client = oAuthClient
   444  	f.svc, err = storage.New(f.client)
   445  	if err != nil {
   446  		return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
   447  	}
   448  
   449  	if f.rootBucket != "" && f.rootDirectory != "" {
   450  		// Check to see if the object exists
   451  		encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
   452  		err = f.pacer.Call(func() (bool, error) {
   453  			_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
   454  			return shouldRetry(err)
   455  		})
   456  		if err == nil {
   457  			newRoot := path.Dir(f.root)
   458  			if newRoot == "." {
   459  				newRoot = ""
   460  			}
   461  			f.setRoot(newRoot)
   462  			// return an error with an fs which points to the parent
   463  			return f, fs.ErrorIsFile
   464  		}
   465  	}
   466  	return f, nil
   467  }
   468  
   469  // Return an Object from a path
   470  //
   471  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   472  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
   473  	o := &Object{
   474  		fs:     f,
   475  		remote: remote,
   476  	}
   477  	if info != nil {
   478  		o.setMetaData(info)
   479  	} else {
   480  		err := o.readMetaData(ctx) // reads info and meta, returning an error
   481  		if err != nil {
   482  			return nil, err
   483  		}
   484  	}
   485  	return o, nil
   486  }
   487  
   488  // NewObject finds the Object at remote.  If it can't be found
   489  // it returns the error fs.ErrorObjectNotFound.
   490  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   491  	return f.newObjectWithInfo(ctx, remote, nil)
   492  }
   493  
   494  // listFn is called from list to handle an object.
   495  type listFn func(remote string, object *storage.Object, isDirectory bool) error
   496  
   497  // list the objects into the function supplied
   498  //
   499  // dir is the starting directory, "" for root
   500  //
   501  // Set recurse to read sub directories
   502  //
   503  // The remote has prefix removed from it and if addBucket is set
   504  // then it adds the bucket to the start.
   505  func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
   506  	if prefix != "" {
   507  		prefix += "/"
   508  	}
   509  	if directory != "" {
   510  		directory += "/"
   511  	}
   512  	list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
   513  	if !recurse {
   514  		list = list.Delimiter("/")
   515  	}
   516  	for {
   517  		var objects *storage.Objects
   518  		err = f.pacer.Call(func() (bool, error) {
   519  			objects, err = list.Context(ctx).Do()
   520  			return shouldRetry(err)
   521  		})
   522  		if err != nil {
   523  			if gErr, ok := err.(*googleapi.Error); ok {
   524  				if gErr.Code == http.StatusNotFound {
   525  					err = fs.ErrorDirNotFound
   526  				}
   527  			}
   528  			return err
   529  		}
   530  		if !recurse {
   531  			var object storage.Object
   532  			for _, remote := range objects.Prefixes {
   533  				if !strings.HasSuffix(remote, "/") {
   534  					continue
   535  				}
   536  				remote = f.opt.Enc.ToStandardPath(remote)
   537  				if !strings.HasPrefix(remote, prefix) {
   538  					fs.Logf(f, "Odd name received %q", remote)
   539  					continue
   540  				}
   541  				remote = remote[len(prefix) : len(remote)-1]
   542  				if addBucket {
   543  					remote = path.Join(bucket, remote)
   544  				}
   545  				err = fn(remote, &object, true)
   546  				if err != nil {
   547  					return err
   548  				}
   549  			}
   550  		}
   551  		for _, object := range objects.Items {
   552  			remote := f.opt.Enc.ToStandardPath(object.Name)
   553  			if !strings.HasPrefix(remote, prefix) {
   554  				fs.Logf(f, "Odd name received %q", object.Name)
   555  				continue
   556  			}
   557  			remote = remote[len(prefix):]
   558  			isDirectory := strings.HasSuffix(remote, "/")
   559  			if addBucket {
   560  				remote = path.Join(bucket, remote)
   561  			}
   562  			// is this a directory marker?
   563  			if isDirectory && object.Size == 0 {
   564  				continue // skip directory marker
   565  			}
   566  			err = fn(remote, object, false)
   567  			if err != nil {
   568  				return err
   569  			}
   570  		}
   571  		if objects.NextPageToken == "" {
   572  			break
   573  		}
   574  		list.PageToken(objects.NextPageToken)
   575  	}
   576  	return nil
   577  }
   578  
   579  // Convert a list item into a DirEntry
   580  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
   581  	if isDirectory {
   582  		d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
   583  		return d, nil
   584  	}
   585  	o, err := f.newObjectWithInfo(ctx, remote, object)
   586  	if err != nil {
   587  		return nil, err
   588  	}
   589  	return o, nil
   590  }
   591  
   592  // listDir lists a single directory
   593  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
   594  	// List the objects
   595  	err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
   596  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
   597  		if err != nil {
   598  			return err
   599  		}
   600  		if entry != nil {
   601  			entries = append(entries, entry)
   602  		}
   603  		return nil
   604  	})
   605  	if err != nil {
   606  		return nil, err
   607  	}
   608  	// bucket must be present if listing succeeded
   609  	f.cache.MarkOK(bucket)
   610  	return entries, err
   611  }
   612  
   613  // listBuckets lists the buckets
   614  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
   615  	if f.opt.ProjectNumber == "" {
   616  		return nil, errors.New("can't list buckets without project number")
   617  	}
   618  	listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
   619  	for {
   620  		var buckets *storage.Buckets
   621  		err = f.pacer.Call(func() (bool, error) {
   622  			buckets, err = listBuckets.Context(ctx).Do()
   623  			return shouldRetry(err)
   624  		})
   625  		if err != nil {
   626  			return nil, err
   627  		}
   628  		for _, bucket := range buckets.Items {
   629  			d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{})
   630  			entries = append(entries, d)
   631  		}
   632  		if buckets.NextPageToken == "" {
   633  			break
   634  		}
   635  		listBuckets.PageToken(buckets.NextPageToken)
   636  	}
   637  	return entries, nil
   638  }
   639  
   640  // List the objects and directories in dir into entries.  The
   641  // entries can be returned in any order but should be for a
   642  // complete directory.
   643  //
   644  // dir should be "" to list the root, and should not have
   645  // trailing slashes.
   646  //
   647  // This should return ErrDirNotFound if the directory isn't
   648  // found.
   649  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   650  	bucket, directory := f.split(dir)
   651  	if bucket == "" {
   652  		if directory != "" {
   653  			return nil, fs.ErrorListBucketRequired
   654  		}
   655  		return f.listBuckets(ctx)
   656  	}
   657  	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
   658  }
   659  
   660  // ListR lists the objects and directories of the Fs starting
   661  // from dir recursively into out.
   662  //
   663  // dir should be "" to start from the root, and should not
   664  // have trailing slashes.
   665  //
   666  // This should return ErrDirNotFound if the directory isn't
   667  // found.
   668  //
   669  // It should call callback for each tranche of entries read.
   670  // These need not be returned in any particular order.  If
   671  // callback returns an error then the listing will stop
   672  // immediately.
   673  //
   674  // Don't implement this unless you have a more efficient way
   675  // of listing recursively that doing a directory traversal.
   676  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   677  	bucket, directory := f.split(dir)
   678  	list := walk.NewListRHelper(callback)
   679  	listR := func(bucket, directory, prefix string, addBucket bool) error {
   680  		return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
   681  			entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
   682  			if err != nil {
   683  				return err
   684  			}
   685  			return list.Add(entry)
   686  		})
   687  	}
   688  	if bucket == "" {
   689  		entries, err := f.listBuckets(ctx)
   690  		if err != nil {
   691  			return err
   692  		}
   693  		for _, entry := range entries {
   694  			err = list.Add(entry)
   695  			if err != nil {
   696  				return err
   697  			}
   698  			bucket := entry.Remote()
   699  			err = listR(bucket, "", f.rootDirectory, true)
   700  			if err != nil {
   701  				return err
   702  			}
   703  			// bucket must be present if listing succeeded
   704  			f.cache.MarkOK(bucket)
   705  		}
   706  	} else {
   707  		err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
   708  		if err != nil {
   709  			return err
   710  		}
   711  		// bucket must be present if listing succeeded
   712  		f.cache.MarkOK(bucket)
   713  	}
   714  	return list.Flush()
   715  }
   716  
   717  // Put the object into the bucket
   718  //
   719  // Copy the reader in to the new object which is returned
   720  //
   721  // The new object may have been created if an error is returned
   722  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   723  	// Temporary Object under construction
   724  	o := &Object{
   725  		fs:     f,
   726  		remote: src.Remote(),
   727  	}
   728  	return o, o.Update(ctx, in, src, options...)
   729  }
   730  
   731  // PutStream uploads to the remote path with the modTime given of indeterminate size
   732  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   733  	return f.Put(ctx, in, src, options...)
   734  }
   735  
   736  // Mkdir creates the bucket if it doesn't exist
   737  func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
   738  	bucket, _ := f.split(dir)
   739  	return f.makeBucket(ctx, bucket)
   740  }
   741  
   742  // makeBucket creates the bucket if it doesn't exist
   743  func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
   744  	return f.cache.Create(bucket, func() error {
   745  		// List something from the bucket to see if it exists.  Doing it like this enables the use of a
   746  		// service account that only has the "Storage Object Admin" role.  See #2193 for details.
   747  		err = f.pacer.Call(func() (bool, error) {
   748  			_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
   749  			return shouldRetry(err)
   750  		})
   751  		if err == nil {
   752  			// Bucket already exists
   753  			return nil
   754  		} else if gErr, ok := err.(*googleapi.Error); ok {
   755  			if gErr.Code != http.StatusNotFound {
   756  				return errors.Wrap(err, "failed to get bucket")
   757  			}
   758  		} else {
   759  			return errors.Wrap(err, "failed to get bucket")
   760  		}
   761  
   762  		if f.opt.ProjectNumber == "" {
   763  			return errors.New("can't make bucket without project number")
   764  		}
   765  
   766  		bucket := storage.Bucket{
   767  			Name:         bucket,
   768  			Location:     f.opt.Location,
   769  			StorageClass: f.opt.StorageClass,
   770  		}
   771  		if f.opt.BucketPolicyOnly {
   772  			bucket.IamConfiguration = &storage.BucketIamConfiguration{
   773  				BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
   774  					Enabled: true,
   775  				},
   776  			}
   777  		}
   778  		return f.pacer.Call(func() (bool, error) {
   779  			insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
   780  			if !f.opt.BucketPolicyOnly {
   781  				insertBucket.PredefinedAcl(f.opt.BucketACL)
   782  			}
   783  			_, err = insertBucket.Context(ctx).Do()
   784  			return shouldRetry(err)
   785  		})
   786  	}, nil)
   787  }
   788  
   789  // Rmdir deletes the bucket if the fs is at the root
   790  //
   791  // Returns an error if it isn't empty: Error 409: The bucket you tried
   792  // to delete was not empty.
   793  func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
   794  	bucket, directory := f.split(dir)
   795  	if bucket == "" || directory != "" {
   796  		return nil
   797  	}
   798  	return f.cache.Remove(bucket, func() error {
   799  		return f.pacer.Call(func() (bool, error) {
   800  			err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
   801  			return shouldRetry(err)
   802  		})
   803  	})
   804  }
   805  
   806  // Precision returns the precision
   807  func (f *Fs) Precision() time.Duration {
   808  	return time.Nanosecond
   809  }
   810  
   811  // Copy src to this remote using server side copy operations.
   812  //
   813  // This is stored with the remote path given
   814  //
   815  // It returns the destination Object and a possible error
   816  //
   817  // Will only be called if src.Fs().Name() == f.Name()
   818  //
   819  // If it isn't possible then return fs.ErrorCantCopy
   820  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   821  	dstBucket, dstPath := f.split(remote)
   822  	err := f.makeBucket(ctx, dstBucket)
   823  	if err != nil {
   824  		return nil, err
   825  	}
   826  	srcObj, ok := src.(*Object)
   827  	if !ok {
   828  		fs.Debugf(src, "Can't copy - not same remote type")
   829  		return nil, fs.ErrorCantCopy
   830  	}
   831  	srcBucket, srcPath := srcObj.split()
   832  
   833  	// Temporary Object under construction
   834  	dstObj := &Object{
   835  		fs:     f,
   836  		remote: remote,
   837  	}
   838  
   839  	var newObject *storage.Object
   840  	err = f.pacer.Call(func() (bool, error) {
   841  		copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
   842  		if !f.opt.BucketPolicyOnly {
   843  			copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
   844  		}
   845  		newObject, err = copyObject.Context(ctx).Do()
   846  		return shouldRetry(err)
   847  	})
   848  	if err != nil {
   849  		return nil, err
   850  	}
   851  	// Set the metadata for the new object while we have it
   852  	dstObj.setMetaData(newObject)
   853  	return dstObj, nil
   854  }
   855  
   856  // Hashes returns the supported hash sets.
   857  func (f *Fs) Hashes() hash.Set {
   858  	return hash.Set(hash.MD5)
   859  }
   860  
   861  // ------------------------------------------------------------
   862  
   863  // Fs returns the parent Fs
   864  func (o *Object) Fs() fs.Info {
   865  	return o.fs
   866  }
   867  
   868  // Return a string version
   869  func (o *Object) String() string {
   870  	if o == nil {
   871  		return "<nil>"
   872  	}
   873  	return o.remote
   874  }
   875  
   876  // Remote returns the remote path
   877  func (o *Object) Remote() string {
   878  	return o.remote
   879  }
   880  
   881  // Hash returns the Md5sum of an object returning a lowercase hex string
   882  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
   883  	if t != hash.MD5 {
   884  		return "", hash.ErrUnsupported
   885  	}
   886  	return o.md5sum, nil
   887  }
   888  
   889  // Size returns the size of an object in bytes
   890  func (o *Object) Size() int64 {
   891  	return o.bytes
   892  }
   893  
   894  // setMetaData sets the fs data from a storage.Object
   895  func (o *Object) setMetaData(info *storage.Object) {
   896  	o.url = info.MediaLink
   897  	o.bytes = int64(info.Size)
   898  	o.mimeType = info.ContentType
   899  
   900  	// Read md5sum
   901  	md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
   902  	if err != nil {
   903  		fs.Logf(o, "Bad MD5 decode: %v", err)
   904  	} else {
   905  		o.md5sum = hex.EncodeToString(md5sumData)
   906  	}
   907  
   908  	// read mtime out of metadata if available
   909  	mtimeString, ok := info.Metadata[metaMtime]
   910  	if ok {
   911  		modTime, err := time.Parse(timeFormatIn, mtimeString)
   912  		if err == nil {
   913  			o.modTime = modTime
   914  			return
   915  		}
   916  		fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
   917  	}
   918  
   919  	// Fallback to the Updated time
   920  	modTime, err := time.Parse(timeFormatIn, info.Updated)
   921  	if err != nil {
   922  		fs.Logf(o, "Bad time decode: %v", err)
   923  	} else {
   924  		o.modTime = modTime
   925  	}
   926  }
   927  
   928  // readObjectInfo reads the definition for an object
   929  func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
   930  	bucket, bucketPath := o.split()
   931  	err = o.fs.pacer.Call(func() (bool, error) {
   932  		object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
   933  		return shouldRetry(err)
   934  	})
   935  	if err != nil {
   936  		if gErr, ok := err.(*googleapi.Error); ok {
   937  			if gErr.Code == http.StatusNotFound {
   938  				return nil, fs.ErrorObjectNotFound
   939  			}
   940  		}
   941  		return nil, err
   942  	}
   943  	return object, nil
   944  }
   945  
   946  // readMetaData gets the metadata if it hasn't already been fetched
   947  //
   948  // it also sets the info
   949  func (o *Object) readMetaData(ctx context.Context) (err error) {
   950  	if !o.modTime.IsZero() {
   951  		return nil
   952  	}
   953  	object, err := o.readObjectInfo(ctx)
   954  	if err != nil {
   955  		return err
   956  	}
   957  	o.setMetaData(object)
   958  	return nil
   959  }
   960  
   961  // ModTime returns the modification time of the object
   962  //
   963  // It attempts to read the objects mtime and if that isn't present the
   964  // LastModified returned in the http headers
   965  func (o *Object) ModTime(ctx context.Context) time.Time {
   966  	err := o.readMetaData(ctx)
   967  	if err != nil {
   968  		// fs.Logf(o, "Failed to read metadata: %v", err)
   969  		return time.Now()
   970  	}
   971  	return o.modTime
   972  }
   973  
   974  // Returns metadata for an object
   975  func metadataFromModTime(modTime time.Time) map[string]string {
   976  	metadata := make(map[string]string, 1)
   977  	metadata[metaMtime] = modTime.Format(timeFormatOut)
   978  	return metadata
   979  }
   980  
   981  // SetModTime sets the modification time of the local fs object
   982  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
   983  	// read the complete existing object first
   984  	object, err := o.readObjectInfo(ctx)
   985  	if err != nil {
   986  		return err
   987  	}
   988  	// Add the mtime to the existing metadata
   989  	mtime := modTime.Format(timeFormatOut)
   990  	if object.Metadata == nil {
   991  		object.Metadata = make(map[string]string, 1)
   992  	}
   993  	object.Metadata[metaMtime] = mtime
   994  	// Copy the object to itself to update the metadata
   995  	// Using PATCH requires too many permissions
   996  	bucket, bucketPath := o.split()
   997  	var newObject *storage.Object
   998  	err = o.fs.pacer.Call(func() (bool, error) {
   999  		copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object)
  1000  		if !o.fs.opt.BucketPolicyOnly {
  1001  			copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
  1002  		}
  1003  		newObject, err = copyObject.Context(ctx).Do()
  1004  		return shouldRetry(err)
  1005  	})
  1006  	if err != nil {
  1007  		return err
  1008  	}
  1009  	o.setMetaData(newObject)
  1010  	return nil
  1011  }
  1012  
  1013  // Storable returns a boolean as to whether this object is storable
  1014  func (o *Object) Storable() bool {
  1015  	return true
  1016  }
  1017  
  1018  // Open an object for read
  1019  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1020  	req, err := http.NewRequest("GET", o.url, nil)
  1021  	if err != nil {
  1022  		return nil, err
  1023  	}
  1024  	req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
  1025  	fs.FixRangeOption(options, o.bytes)
  1026  	fs.OpenOptionAddHTTPHeaders(req.Header, options)
  1027  	var res *http.Response
  1028  	err = o.fs.pacer.Call(func() (bool, error) {
  1029  		res, err = o.fs.client.Do(req)
  1030  		if err == nil {
  1031  			err = googleapi.CheckResponse(res)
  1032  			if err != nil {
  1033  				_ = res.Body.Close() // ignore error
  1034  			}
  1035  		}
  1036  		return shouldRetry(err)
  1037  	})
  1038  	if err != nil {
  1039  		return nil, err
  1040  	}
  1041  	_, isRanging := req.Header["Range"]
  1042  	if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
  1043  		_ = res.Body.Close() // ignore error
  1044  		return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
  1045  	}
  1046  	return res.Body, nil
  1047  }
  1048  
  1049  // Update the object with the contents of the io.Reader, modTime and size
  1050  //
  1051  // The new object may have been created if an error is returned
  1052  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1053  	bucket, bucketPath := o.split()
  1054  	err := o.fs.makeBucket(ctx, bucket)
  1055  	if err != nil {
  1056  		return err
  1057  	}
  1058  	modTime := src.ModTime(ctx)
  1059  
  1060  	object := storage.Object{
  1061  		Bucket:      bucket,
  1062  		Name:        bucketPath,
  1063  		ContentType: fs.MimeType(ctx, src),
  1064  		Metadata:    metadataFromModTime(modTime),
  1065  	}
  1066  	var newObject *storage.Object
  1067  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1068  		insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
  1069  		if !o.fs.opt.BucketPolicyOnly {
  1070  			insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
  1071  		}
  1072  		newObject, err = insertObject.Context(ctx).Do()
  1073  		return shouldRetry(err)
  1074  	})
  1075  	if err != nil {
  1076  		return err
  1077  	}
  1078  	// Set the metadata for the new object while we have it
  1079  	o.setMetaData(newObject)
  1080  	return nil
  1081  }
  1082  
  1083  // Remove an object
  1084  func (o *Object) Remove(ctx context.Context) (err error) {
  1085  	bucket, bucketPath := o.split()
  1086  	err = o.fs.pacer.Call(func() (bool, error) {
  1087  		err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
  1088  		return shouldRetry(err)
  1089  	})
  1090  	return err
  1091  }
  1092  
  1093  // MimeType of an Object if known, "" otherwise
  1094  func (o *Object) MimeType(ctx context.Context) string {
  1095  	return o.mimeType
  1096  }
  1097  
  1098  // Check the interfaces are satisfied
  1099  var (
  1100  	_ fs.Fs          = &Fs{}
  1101  	_ fs.Copier      = &Fs{}
  1102  	_ fs.PutStreamer = &Fs{}
  1103  	_ fs.ListRer     = &Fs{}
  1104  	_ fs.Object      = &Object{}
  1105  	_ fs.MimeTyper   = &Object{}
  1106  )