github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/googlecloudstorage/googlecloudstorage.go (about)

     1  // Package googlecloudstorage provides an interface to Google Cloud Storage
     2  package googlecloudstorage
     3  
     4  /*
     5  Notes
     6  
     7  Can't set Updated but can set Metadata on object creation
     8  
     9  Patch needs full_control not just read_write
    10  
    11  FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error
    12  - https://code.google.com/p/google-api-go-client/issues/detail?id=64
    13  */
    14  
    15  import (
    16  	"context"
    17  	"encoding/base64"
    18  	"encoding/hex"
    19  	"fmt"
    20  	"io"
    21  	"io/ioutil"
    22  	"log"
    23  	"net/http"
    24  	"os"
    25  	"path"
    26  	"regexp"
    27  	"strings"
    28  	"sync"
    29  	"time"
    30  
    31  	"github.com/ncw/rclone/fs"
    32  	"github.com/ncw/rclone/fs/config"
    33  	"github.com/ncw/rclone/fs/config/configmap"
    34  	"github.com/ncw/rclone/fs/config/configstruct"
    35  	"github.com/ncw/rclone/fs/config/obscure"
    36  	"github.com/ncw/rclone/fs/fserrors"
    37  	"github.com/ncw/rclone/fs/fshttp"
    38  	"github.com/ncw/rclone/fs/hash"
    39  	"github.com/ncw/rclone/fs/walk"
    40  	"github.com/ncw/rclone/lib/oauthutil"
    41  	"github.com/ncw/rclone/lib/pacer"
    42  	"github.com/pkg/errors"
    43  	"golang.org/x/oauth2"
    44  	"golang.org/x/oauth2/google"
    45  	"google.golang.org/api/googleapi"
    46  
    47  	// NOTE: This API is deprecated
    48  	storage "google.golang.org/api/storage/v1"
    49  )
    50  
    51  const (
    52  	rcloneClientID              = "202264815644.apps.googleusercontent.com"
    53  	rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
    54  	timeFormatIn                = time.RFC3339
    55  	timeFormatOut               = "2006-01-02T15:04:05.000000000Z07:00"
    56  	metaMtime                   = "mtime" // key to store mtime under in metadata
    57  	listChunks                  = 1000    // chunk size to read directory listings
    58  	minSleep                    = 10 * time.Millisecond
    59  )
    60  
    61  var (
    62  	// Description of how to auth for this app
    63  	storageConfig = &oauth2.Config{
    64  		Scopes:       []string{storage.DevstorageReadWriteScope},
    65  		Endpoint:     google.Endpoint,
    66  		ClientID:     rcloneClientID,
    67  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
    68  		RedirectURL:  oauthutil.TitleBarRedirectURL,
    69  	}
    70  )
    71  
    72  // Register with Fs
    73  func init() {
    74  	fs.Register(&fs.RegInfo{
    75  		Name:        "google cloud storage",
    76  		Prefix:      "gcs",
    77  		Description: "Google Cloud Storage (this is not Google Drive)",
    78  		NewFs:       NewFs,
    79  		Config: func(name string, m configmap.Mapper) {
    80  			saFile, _ := m.Get("service_account_file")
    81  			saCreds, _ := m.Get("service_account_credentials")
    82  			if saFile != "" || saCreds != "" {
    83  				return
    84  			}
    85  			err := oauthutil.Config("google cloud storage", name, m, storageConfig)
    86  			if err != nil {
    87  				log.Fatalf("Failed to configure token: %v", err)
    88  			}
    89  		},
    90  		Options: []fs.Option{{
    91  			Name: config.ConfigClientID,
    92  			Help: "Google Application Client Id\nLeave blank normally.",
    93  		}, {
    94  			Name: config.ConfigClientSecret,
    95  			Help: "Google Application Client Secret\nLeave blank normally.",
    96  		}, {
    97  			Name: "project_number",
    98  			Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
    99  		}, {
   100  			Name: "service_account_file",
   101  			Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   102  		}, {
   103  			Name: "service_account_credentials",
   104  			Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   105  			Hide: fs.OptionHideBoth,
   106  		}, {
   107  			Name: "object_acl",
   108  			Help: "Access Control List for new objects.",
   109  			Examples: []fs.OptionExample{{
   110  				Value: "authenticatedRead",
   111  				Help:  "Object owner gets OWNER access, and all Authenticated Users get READER access.",
   112  			}, {
   113  				Value: "bucketOwnerFullControl",
   114  				Help:  "Object owner gets OWNER access, and project team owners get OWNER access.",
   115  			}, {
   116  				Value: "bucketOwnerRead",
   117  				Help:  "Object owner gets OWNER access, and project team owners get READER access.",
   118  			}, {
   119  				Value: "private",
   120  				Help:  "Object owner gets OWNER access [default if left blank].",
   121  			}, {
   122  				Value: "projectPrivate",
   123  				Help:  "Object owner gets OWNER access, and project team members get access according to their roles.",
   124  			}, {
   125  				Value: "publicRead",
   126  				Help:  "Object owner gets OWNER access, and all Users get READER access.",
   127  			}},
   128  		}, {
   129  			Name: "bucket_acl",
   130  			Help: "Access Control List for new buckets.",
   131  			Examples: []fs.OptionExample{{
   132  				Value: "authenticatedRead",
   133  				Help:  "Project team owners get OWNER access, and all Authenticated Users get READER access.",
   134  			}, {
   135  				Value: "private",
   136  				Help:  "Project team owners get OWNER access [default if left blank].",
   137  			}, {
   138  				Value: "projectPrivate",
   139  				Help:  "Project team members get access according to their roles.",
   140  			}, {
   141  				Value: "publicRead",
   142  				Help:  "Project team owners get OWNER access, and all Users get READER access.",
   143  			}, {
   144  				Value: "publicReadWrite",
   145  				Help:  "Project team owners get OWNER access, and all Users get WRITER access.",
   146  			}},
   147  		}, {
   148  			Name: "bucket_policy_only",
   149  			Help: `Access checks should use bucket-level IAM policies.
   150  
   151  If you want to upload objects to a bucket with Bucket Policy Only set
   152  then you will need to set this.
   153  
   154  When it is set, rclone:
   155  
   156  - ignores ACLs set on buckets
   157  - ignores ACLs set on objects
   158  - creates buckets with Bucket Policy Only set
   159  
   160  Docs: https://cloud.google.com/storage/docs/bucket-policy-only
   161  `,
   162  			Default: false,
   163  		}, {
   164  			Name: "location",
   165  			Help: "Location for the newly created buckets.",
   166  			Examples: []fs.OptionExample{{
   167  				Value: "",
   168  				Help:  "Empty for default location (US).",
   169  			}, {
   170  				Value: "asia",
   171  				Help:  "Multi-regional location for Asia.",
   172  			}, {
   173  				Value: "eu",
   174  				Help:  "Multi-regional location for Europe.",
   175  			}, {
   176  				Value: "us",
   177  				Help:  "Multi-regional location for United States.",
   178  			}, {
   179  				Value: "asia-east1",
   180  				Help:  "Taiwan.",
   181  			}, {
   182  				Value: "asia-east2",
   183  				Help:  "Hong Kong.",
   184  			}, {
   185  				Value: "asia-northeast1",
   186  				Help:  "Tokyo.",
   187  			}, {
   188  				Value: "asia-south1",
   189  				Help:  "Mumbai.",
   190  			}, {
   191  				Value: "asia-southeast1",
   192  				Help:  "Singapore.",
   193  			}, {
   194  				Value: "australia-southeast1",
   195  				Help:  "Sydney.",
   196  			}, {
   197  				Value: "europe-north1",
   198  				Help:  "Finland.",
   199  			}, {
   200  				Value: "europe-west1",
   201  				Help:  "Belgium.",
   202  			}, {
   203  				Value: "europe-west2",
   204  				Help:  "London.",
   205  			}, {
   206  				Value: "europe-west3",
   207  				Help:  "Frankfurt.",
   208  			}, {
   209  				Value: "europe-west4",
   210  				Help:  "Netherlands.",
   211  			}, {
   212  				Value: "us-central1",
   213  				Help:  "Iowa.",
   214  			}, {
   215  				Value: "us-east1",
   216  				Help:  "South Carolina.",
   217  			}, {
   218  				Value: "us-east4",
   219  				Help:  "Northern Virginia.",
   220  			}, {
   221  				Value: "us-west1",
   222  				Help:  "Oregon.",
   223  			}, {
   224  				Value: "us-west2",
   225  				Help:  "California.",
   226  			}},
   227  		}, {
   228  			Name: "storage_class",
   229  			Help: "The storage class to use when storing objects in Google Cloud Storage.",
   230  			Examples: []fs.OptionExample{{
   231  				Value: "",
   232  				Help:  "Default",
   233  			}, {
   234  				Value: "MULTI_REGIONAL",
   235  				Help:  "Multi-regional storage class",
   236  			}, {
   237  				Value: "REGIONAL",
   238  				Help:  "Regional storage class",
   239  			}, {
   240  				Value: "NEARLINE",
   241  				Help:  "Nearline storage class",
   242  			}, {
   243  				Value: "COLDLINE",
   244  				Help:  "Coldline storage class",
   245  			}, {
   246  				Value: "DURABLE_REDUCED_AVAILABILITY",
   247  				Help:  "Durable reduced availability storage class",
   248  			}},
   249  		}},
   250  	})
   251  }
   252  
   253  // Options defines the configuration for this backend
   254  type Options struct {
   255  	ProjectNumber             string `config:"project_number"`
   256  	ServiceAccountFile        string `config:"service_account_file"`
   257  	ServiceAccountCredentials string `config:"service_account_credentials"`
   258  	ObjectACL                 string `config:"object_acl"`
   259  	BucketACL                 string `config:"bucket_acl"`
   260  	BucketPolicyOnly          bool   `config:"bucket_policy_only"`
   261  	Location                  string `config:"location"`
   262  	StorageClass              string `config:"storage_class"`
   263  }
   264  
   265  // Fs represents a remote storage server
   266  type Fs struct {
   267  	name       string           // name of this remote
   268  	root       string           // the path we are working on if any
   269  	opt        Options          // parsed options
   270  	features   *fs.Features     // optional features
   271  	svc        *storage.Service // the connection to the storage server
   272  	client     *http.Client     // authorized client
   273  	bucket     string           // the bucket we are working on
   274  	bucketOKMu sync.Mutex       // mutex to protect bucket OK
   275  	bucketOK   bool             // true if we have created the bucket
   276  	pacer      *fs.Pacer        // To pace the API calls
   277  }
   278  
   279  // Object describes a storage object
   280  //
   281  // Will definitely have info but maybe not meta
   282  type Object struct {
   283  	fs       *Fs       // what this object is part of
   284  	remote   string    // The remote path
   285  	url      string    // download path
   286  	md5sum   string    // The MD5Sum of the object
   287  	bytes    int64     // Bytes in the object
   288  	modTime  time.Time // Modified time of the object
   289  	mimeType string
   290  }
   291  
   292  // ------------------------------------------------------------
   293  
   294  // Name of the remote (as passed into NewFs)
   295  func (f *Fs) Name() string {
   296  	return f.name
   297  }
   298  
   299  // Root of the remote (as passed into NewFs)
   300  func (f *Fs) Root() string {
   301  	if f.root == "" {
   302  		return f.bucket
   303  	}
   304  	return f.bucket + "/" + f.root
   305  }
   306  
   307  // String converts this Fs to a string
   308  func (f *Fs) String() string {
   309  	if f.root == "" {
   310  		return fmt.Sprintf("Storage bucket %s", f.bucket)
   311  	}
   312  	return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
   313  }
   314  
   315  // Features returns the optional features of this Fs
   316  func (f *Fs) Features() *fs.Features {
   317  	return f.features
   318  }
   319  
   320  // shouldRetry determines whether a given err rates being retried
   321  func shouldRetry(err error) (again bool, errOut error) {
   322  	again = false
   323  	if err != nil {
   324  		if fserrors.ShouldRetry(err) {
   325  			again = true
   326  		} else {
   327  			switch gerr := err.(type) {
   328  			case *googleapi.Error:
   329  				if gerr.Code >= 500 && gerr.Code < 600 {
   330  					// All 5xx errors should be retried
   331  					again = true
   332  				} else if len(gerr.Errors) > 0 {
   333  					reason := gerr.Errors[0].Reason
   334  					if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
   335  						again = true
   336  					}
   337  				}
   338  			}
   339  		}
   340  	}
   341  	return again, err
   342  }
   343  
   344  // Pattern to match a storage path
   345  var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
   346  
   347  // parseParse parses a storage 'url'
   348  func parsePath(path string) (bucket, directory string, err error) {
   349  	parts := matcher.FindStringSubmatch(path)
   350  	if parts == nil {
   351  		err = errors.Errorf("couldn't find bucket in storage path %q", path)
   352  	} else {
   353  		bucket, directory = parts[1], parts[2]
   354  		directory = strings.Trim(directory, "/")
   355  	}
   356  	return
   357  }
   358  
   359  func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
   360  	conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
   361  	if err != nil {
   362  		return nil, errors.Wrap(err, "error processing credentials")
   363  	}
   364  	ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
   365  	return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
   366  }
   367  
   368  // NewFs constructs an Fs from the path, bucket:path
   369  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   370  	var oAuthClient *http.Client
   371  
   372  	// Parse config into Options struct
   373  	opt := new(Options)
   374  	err := configstruct.Set(m, opt)
   375  	if err != nil {
   376  		return nil, err
   377  	}
   378  	if opt.ObjectACL == "" {
   379  		opt.ObjectACL = "private"
   380  	}
   381  	if opt.BucketACL == "" {
   382  		opt.BucketACL = "private"
   383  	}
   384  
   385  	// try loading service account credentials from env variable, then from a file
   386  	if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
   387  		loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
   388  		if err != nil {
   389  			return nil, errors.Wrap(err, "error opening service account credentials file")
   390  		}
   391  		opt.ServiceAccountCredentials = string(loadedCreds)
   392  	}
   393  	if opt.ServiceAccountCredentials != "" {
   394  		oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
   395  		if err != nil {
   396  			return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
   397  		}
   398  	} else {
   399  		oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
   400  		if err != nil {
   401  			ctx := context.Background()
   402  			oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
   403  			if err != nil {
   404  				return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
   405  			}
   406  		}
   407  	}
   408  
   409  	bucket, directory, err := parsePath(root)
   410  	if err != nil {
   411  		return nil, err
   412  	}
   413  
   414  	f := &Fs{
   415  		name:   name,
   416  		bucket: bucket,
   417  		root:   directory,
   418  		opt:    *opt,
   419  		pacer:  fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
   420  	}
   421  	f.features = (&fs.Features{
   422  		ReadMimeType:  true,
   423  		WriteMimeType: true,
   424  		BucketBased:   true,
   425  	}).Fill(f)
   426  
   427  	// Create a new authorized Drive client.
   428  	f.client = oAuthClient
   429  	f.svc, err = storage.New(f.client)
   430  	if err != nil {
   431  		return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
   432  	}
   433  
   434  	if f.root != "" {
   435  		f.root += "/"
   436  		// Check to see if the object exists
   437  		err = f.pacer.Call(func() (bool, error) {
   438  			_, err = f.svc.Objects.Get(bucket, directory).Do()
   439  			return shouldRetry(err)
   440  		})
   441  		if err == nil {
   442  			f.root = path.Dir(directory)
   443  			if f.root == "." {
   444  				f.root = ""
   445  			} else {
   446  				f.root += "/"
   447  			}
   448  			// return an error with an fs which points to the parent
   449  			return f, fs.ErrorIsFile
   450  		}
   451  	}
   452  	return f, nil
   453  }
   454  
   455  // Return an Object from a path
   456  //
   457  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   458  func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
   459  	o := &Object{
   460  		fs:     f,
   461  		remote: remote,
   462  	}
   463  	if info != nil {
   464  		o.setMetaData(info)
   465  	} else {
   466  		err := o.readMetaData() // reads info and meta, returning an error
   467  		if err != nil {
   468  			return nil, err
   469  		}
   470  	}
   471  	return o, nil
   472  }
   473  
   474  // NewObject finds the Object at remote.  If it can't be found
   475  // it returns the error fs.ErrorObjectNotFound.
   476  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   477  	return f.newObjectWithInfo(remote, nil)
   478  }
   479  
   480  // listFn is called from list to handle an object.
   481  type listFn func(remote string, object *storage.Object, isDirectory bool) error
   482  
   483  // list the objects into the function supplied
   484  //
   485  // dir is the starting directory, "" for root
   486  //
   487  // Set recurse to read sub directories
   488  func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err error) {
   489  	root := f.root
   490  	rootLength := len(root)
   491  	if dir != "" {
   492  		root += dir + "/"
   493  	}
   494  	list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
   495  	if !recurse {
   496  		list = list.Delimiter("/")
   497  	}
   498  	for {
   499  		var objects *storage.Objects
   500  		err = f.pacer.Call(func() (bool, error) {
   501  			objects, err = list.Do()
   502  			return shouldRetry(err)
   503  		})
   504  		if err != nil {
   505  			if gErr, ok := err.(*googleapi.Error); ok {
   506  				if gErr.Code == http.StatusNotFound {
   507  					err = fs.ErrorDirNotFound
   508  				}
   509  			}
   510  			return err
   511  		}
   512  		if !recurse {
   513  			var object storage.Object
   514  			for _, prefix := range objects.Prefixes {
   515  				if !strings.HasSuffix(prefix, "/") {
   516  					continue
   517  				}
   518  				err = fn(prefix[rootLength:len(prefix)-1], &object, true)
   519  				if err != nil {
   520  					return err
   521  				}
   522  			}
   523  		}
   524  		for _, object := range objects.Items {
   525  			if !strings.HasPrefix(object.Name, root) {
   526  				fs.Logf(f, "Odd name received %q", object.Name)
   527  				continue
   528  			}
   529  			remote := object.Name[rootLength:]
   530  			// is this a directory marker?
   531  			if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
   532  				if recurse && remote != "" {
   533  					// add a directory in if --fast-list since will have no prefixes
   534  					err = fn(remote[:len(remote)-1], object, true)
   535  					if err != nil {
   536  						return err
   537  					}
   538  				}
   539  				continue // skip directory marker
   540  			}
   541  			err = fn(remote, object, false)
   542  			if err != nil {
   543  				return err
   544  			}
   545  		}
   546  		if objects.NextPageToken == "" {
   547  			break
   548  		}
   549  		list.PageToken(objects.NextPageToken)
   550  	}
   551  	return nil
   552  }
   553  
   554  // Convert a list item into a DirEntry
   555  func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
   556  	if isDirectory {
   557  		d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
   558  		return d, nil
   559  	}
   560  	o, err := f.newObjectWithInfo(remote, object)
   561  	if err != nil {
   562  		return nil, err
   563  	}
   564  	return o, nil
   565  }
   566  
   567  // mark the bucket as being OK
   568  func (f *Fs) markBucketOK() {
   569  	if f.bucket != "" {
   570  		f.bucketOKMu.Lock()
   571  		f.bucketOK = true
   572  		f.bucketOKMu.Unlock()
   573  	}
   574  }
   575  
   576  // listDir lists a single directory
   577  func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   578  	// List the objects
   579  	err = f.list(ctx, dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
   580  		entry, err := f.itemToDirEntry(remote, object, isDirectory)
   581  		if err != nil {
   582  			return err
   583  		}
   584  		if entry != nil {
   585  			entries = append(entries, entry)
   586  		}
   587  		return nil
   588  	})
   589  	if err != nil {
   590  		return nil, err
   591  	}
   592  	// bucket must be present if listing succeeded
   593  	f.markBucketOK()
   594  	return entries, err
   595  }
   596  
   597  // listBuckets lists the buckets
   598  func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
   599  	if dir != "" {
   600  		return nil, fs.ErrorListBucketRequired
   601  	}
   602  	if f.opt.ProjectNumber == "" {
   603  		return nil, errors.New("can't list buckets without project number")
   604  	}
   605  	listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
   606  	for {
   607  		var buckets *storage.Buckets
   608  		err = f.pacer.Call(func() (bool, error) {
   609  			buckets, err = listBuckets.Do()
   610  			return shouldRetry(err)
   611  		})
   612  		if err != nil {
   613  			return nil, err
   614  		}
   615  		for _, bucket := range buckets.Items {
   616  			d := fs.NewDir(bucket.Name, time.Time{})
   617  			entries = append(entries, d)
   618  		}
   619  		if buckets.NextPageToken == "" {
   620  			break
   621  		}
   622  		listBuckets.PageToken(buckets.NextPageToken)
   623  	}
   624  	return entries, nil
   625  }
   626  
   627  // List the objects and directories in dir into entries.  The
   628  // entries can be returned in any order but should be for a
   629  // complete directory.
   630  //
   631  // dir should be "" to list the root, and should not have
   632  // trailing slashes.
   633  //
   634  // This should return ErrDirNotFound if the directory isn't
   635  // found.
   636  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   637  	if f.bucket == "" {
   638  		return f.listBuckets(dir)
   639  	}
   640  	return f.listDir(ctx, dir)
   641  }
   642  
   643  // ListR lists the objects and directories of the Fs starting
   644  // from dir recursively into out.
   645  //
   646  // dir should be "" to start from the root, and should not
   647  // have trailing slashes.
   648  //
   649  // This should return ErrDirNotFound if the directory isn't
   650  // found.
   651  //
   652  // It should call callback for each tranche of entries read.
   653  // These need not be returned in any particular order.  If
   654  // callback returns an error then the listing will stop
   655  // immediately.
   656  //
   657  // Don't implement this unless you have a more efficient way
   658  // of listing recursively that doing a directory traversal.
   659  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   660  	if f.bucket == "" {
   661  		return fs.ErrorListBucketRequired
   662  	}
   663  	list := walk.NewListRHelper(callback)
   664  	err = f.list(ctx, dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
   665  		entry, err := f.itemToDirEntry(remote, object, isDirectory)
   666  		if err != nil {
   667  			return err
   668  		}
   669  		return list.Add(entry)
   670  	})
   671  	if err != nil {
   672  		return err
   673  	}
   674  	// bucket must be present if listing succeeded
   675  	f.markBucketOK()
   676  	return list.Flush()
   677  }
   678  
   679  // Put the object into the bucket
   680  //
   681  // Copy the reader in to the new object which is returned
   682  //
   683  // The new object may have been created if an error is returned
   684  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   685  	// Temporary Object under construction
   686  	o := &Object{
   687  		fs:     f,
   688  		remote: src.Remote(),
   689  	}
   690  	return o, o.Update(ctx, in, src, options...)
   691  }
   692  
   693  // PutStream uploads to the remote path with the modTime given of indeterminate size
   694  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   695  	return f.Put(ctx, in, src, options...)
   696  }
   697  
   698  // Mkdir creates the bucket if it doesn't exist
   699  func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
   700  	f.bucketOKMu.Lock()
   701  	defer f.bucketOKMu.Unlock()
   702  	if f.bucketOK {
   703  		return nil
   704  	}
   705  	// List something from the bucket to see if it exists.  Doing it like this enables the use of a
   706  	// service account that only has the "Storage Object Admin" role.  See #2193 for details.
   707  
   708  	err = f.pacer.Call(func() (bool, error) {
   709  		_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
   710  		return shouldRetry(err)
   711  	})
   712  	if err == nil {
   713  		// Bucket already exists
   714  		f.bucketOK = true
   715  		return nil
   716  	} else if gErr, ok := err.(*googleapi.Error); ok {
   717  		if gErr.Code != http.StatusNotFound {
   718  			return errors.Wrap(err, "failed to get bucket")
   719  		}
   720  	} else {
   721  		return errors.Wrap(err, "failed to get bucket")
   722  	}
   723  
   724  	if f.opt.ProjectNumber == "" {
   725  		return errors.New("can't make bucket without project number")
   726  	}
   727  
   728  	bucket := storage.Bucket{
   729  		Name:         f.bucket,
   730  		Location:     f.opt.Location,
   731  		StorageClass: f.opt.StorageClass,
   732  	}
   733  	if f.opt.BucketPolicyOnly {
   734  		bucket.IamConfiguration = &storage.BucketIamConfiguration{
   735  			BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
   736  				Enabled: true,
   737  			},
   738  		}
   739  	}
   740  	err = f.pacer.Call(func() (bool, error) {
   741  		insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
   742  		if !f.opt.BucketPolicyOnly {
   743  			insertBucket.PredefinedAcl(f.opt.BucketACL)
   744  		}
   745  		_, err = insertBucket.Do()
   746  		return shouldRetry(err)
   747  	})
   748  	if err == nil {
   749  		f.bucketOK = true
   750  	}
   751  	return err
   752  }
   753  
   754  // Rmdir deletes the bucket if the fs is at the root
   755  //
   756  // Returns an error if it isn't empty: Error 409: The bucket you tried
   757  // to delete was not empty.
   758  func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
   759  	f.bucketOKMu.Lock()
   760  	defer f.bucketOKMu.Unlock()
   761  	if f.root != "" || dir != "" {
   762  		return nil
   763  	}
   764  	err = f.pacer.Call(func() (bool, error) {
   765  		err = f.svc.Buckets.Delete(f.bucket).Do()
   766  		return shouldRetry(err)
   767  	})
   768  	if err == nil {
   769  		f.bucketOK = false
   770  	}
   771  	return err
   772  }
   773  
   774  // Precision returns the precision
   775  func (f *Fs) Precision() time.Duration {
   776  	return time.Nanosecond
   777  }
   778  
   779  // Copy src to this remote using server side copy operations.
   780  //
   781  // This is stored with the remote path given
   782  //
   783  // It returns the destination Object and a possible error
   784  //
   785  // Will only be called if src.Fs().Name() == f.Name()
   786  //
   787  // If it isn't possible then return fs.ErrorCantCopy
   788  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   789  	err := f.Mkdir(ctx, "")
   790  	if err != nil {
   791  		return nil, err
   792  	}
   793  	srcObj, ok := src.(*Object)
   794  	if !ok {
   795  		fs.Debugf(src, "Can't copy - not same remote type")
   796  		return nil, fs.ErrorCantCopy
   797  	}
   798  
   799  	// Temporary Object under construction
   800  	dstObj := &Object{
   801  		fs:     f,
   802  		remote: remote,
   803  	}
   804  
   805  	srcBucket := srcObj.fs.bucket
   806  	srcObject := srcObj.fs.root + srcObj.remote
   807  	dstBucket := f.bucket
   808  	dstObject := f.root + remote
   809  	var newObject *storage.Object
   810  	err = f.pacer.Call(func() (bool, error) {
   811  		newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
   812  		return shouldRetry(err)
   813  	})
   814  	if err != nil {
   815  		return nil, err
   816  	}
   817  	// Set the metadata for the new object while we have it
   818  	dstObj.setMetaData(newObject)
   819  	return dstObj, nil
   820  }
   821  
   822  // Hashes returns the supported hash sets.
   823  func (f *Fs) Hashes() hash.Set {
   824  	return hash.Set(hash.MD5)
   825  }
   826  
   827  // ------------------------------------------------------------
   828  
   829  // Fs returns the parent Fs
   830  func (o *Object) Fs() fs.Info {
   831  	return o.fs
   832  }
   833  
   834  // Return a string version
   835  func (o *Object) String() string {
   836  	if o == nil {
   837  		return "<nil>"
   838  	}
   839  	return o.remote
   840  }
   841  
   842  // Remote returns the remote path
   843  func (o *Object) Remote() string {
   844  	return o.remote
   845  }
   846  
   847  // Hash returns the Md5sum of an object returning a lowercase hex string
   848  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
   849  	if t != hash.MD5 {
   850  		return "", hash.ErrUnsupported
   851  	}
   852  	return o.md5sum, nil
   853  }
   854  
   855  // Size returns the size of an object in bytes
   856  func (o *Object) Size() int64 {
   857  	return o.bytes
   858  }
   859  
   860  // setMetaData sets the fs data from a storage.Object
   861  func (o *Object) setMetaData(info *storage.Object) {
   862  	o.url = info.MediaLink
   863  	o.bytes = int64(info.Size)
   864  	o.mimeType = info.ContentType
   865  
   866  	// Read md5sum
   867  	md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
   868  	if err != nil {
   869  		fs.Logf(o, "Bad MD5 decode: %v", err)
   870  	} else {
   871  		o.md5sum = hex.EncodeToString(md5sumData)
   872  	}
   873  
   874  	// read mtime out of metadata if available
   875  	mtimeString, ok := info.Metadata[metaMtime]
   876  	if ok {
   877  		modTime, err := time.Parse(timeFormatIn, mtimeString)
   878  		if err == nil {
   879  			o.modTime = modTime
   880  			return
   881  		}
   882  		fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
   883  	}
   884  
   885  	// Fallback to the Updated time
   886  	modTime, err := time.Parse(timeFormatIn, info.Updated)
   887  	if err != nil {
   888  		fs.Logf(o, "Bad time decode: %v", err)
   889  	} else {
   890  		o.modTime = modTime
   891  	}
   892  }
   893  
   894  // readMetaData gets the metadata if it hasn't already been fetched
   895  //
   896  // it also sets the info
   897  func (o *Object) readMetaData() (err error) {
   898  	if !o.modTime.IsZero() {
   899  		return nil
   900  	}
   901  	var object *storage.Object
   902  	err = o.fs.pacer.Call(func() (bool, error) {
   903  		object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
   904  		return shouldRetry(err)
   905  	})
   906  	if err != nil {
   907  		if gErr, ok := err.(*googleapi.Error); ok {
   908  			if gErr.Code == http.StatusNotFound {
   909  				return fs.ErrorObjectNotFound
   910  			}
   911  		}
   912  		return err
   913  	}
   914  	o.setMetaData(object)
   915  	return nil
   916  }
   917  
   918  // ModTime returns the modification time of the object
   919  //
   920  // It attempts to read the objects mtime and if that isn't present the
   921  // LastModified returned in the http headers
   922  func (o *Object) ModTime(ctx context.Context) time.Time {
   923  	err := o.readMetaData()
   924  	if err != nil {
   925  		// fs.Logf(o, "Failed to read metadata: %v", err)
   926  		return time.Now()
   927  	}
   928  	return o.modTime
   929  }
   930  
   931  // Returns metadata for an object
   932  func metadataFromModTime(modTime time.Time) map[string]string {
   933  	metadata := make(map[string]string, 1)
   934  	metadata[metaMtime] = modTime.Format(timeFormatOut)
   935  	return metadata
   936  }
   937  
   938  // SetModTime sets the modification time of the local fs object
   939  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
   940  	// This only adds metadata so will perserve other metadata
   941  	object := storage.Object{
   942  		Bucket:   o.fs.bucket,
   943  		Name:     o.fs.root + o.remote,
   944  		Metadata: metadataFromModTime(modTime),
   945  	}
   946  	var newObject *storage.Object
   947  	err = o.fs.pacer.Call(func() (bool, error) {
   948  		newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
   949  		return shouldRetry(err)
   950  	})
   951  	if err != nil {
   952  		return err
   953  	}
   954  	o.setMetaData(newObject)
   955  	return nil
   956  }
   957  
   958  // Storable returns a boolean as to whether this object is storable
   959  func (o *Object) Storable() bool {
   960  	return true
   961  }
   962  
   963  // Open an object for read
   964  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
   965  	req, err := http.NewRequest("GET", o.url, nil)
   966  	if err != nil {
   967  		return nil, err
   968  	}
   969  	fs.OpenOptionAddHTTPHeaders(req.Header, options)
   970  	var res *http.Response
   971  	err = o.fs.pacer.Call(func() (bool, error) {
   972  		res, err = o.fs.client.Do(req)
   973  		if err == nil {
   974  			err = googleapi.CheckResponse(res)
   975  			if err != nil {
   976  				_ = res.Body.Close() // ignore error
   977  			}
   978  		}
   979  		return shouldRetry(err)
   980  	})
   981  	if err != nil {
   982  		return nil, err
   983  	}
   984  	_, isRanging := req.Header["Range"]
   985  	if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
   986  		_ = res.Body.Close() // ignore error
   987  		return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
   988  	}
   989  	return res.Body, nil
   990  }
   991  
   992  // Update the object with the contents of the io.Reader, modTime and size
   993  //
   994  // The new object may have been created if an error is returned
   995  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
   996  	err := o.fs.Mkdir(ctx, "")
   997  	if err != nil {
   998  		return err
   999  	}
  1000  	modTime := src.ModTime(ctx)
  1001  
  1002  	object := storage.Object{
  1003  		Bucket:      o.fs.bucket,
  1004  		Name:        o.fs.root + o.remote,
  1005  		ContentType: fs.MimeType(ctx, src),
  1006  		Metadata:    metadataFromModTime(modTime),
  1007  	}
  1008  	var newObject *storage.Object
  1009  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1010  		insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
  1011  		if !o.fs.opt.BucketPolicyOnly {
  1012  			insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
  1013  		}
  1014  		newObject, err = insertObject.Do()
  1015  		return shouldRetry(err)
  1016  	})
  1017  	if err != nil {
  1018  		return err
  1019  	}
  1020  	// Set the metadata for the new object while we have it
  1021  	o.setMetaData(newObject)
  1022  	return nil
  1023  }
  1024  
  1025  // Remove an object
  1026  func (o *Object) Remove(ctx context.Context) (err error) {
  1027  	err = o.fs.pacer.Call(func() (bool, error) {
  1028  		err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
  1029  		return shouldRetry(err)
  1030  	})
  1031  	return err
  1032  }
  1033  
  1034  // MimeType of an Object if known, "" otherwise
  1035  func (o *Object) MimeType(ctx context.Context) string {
  1036  	return o.mimeType
  1037  }
  1038  
  1039  // Check the interfaces are satisfied
  1040  var (
  1041  	_ fs.Fs          = &Fs{}
  1042  	_ fs.Copier      = &Fs{}
  1043  	_ fs.PutStreamer = &Fs{}
  1044  	_ fs.ListRer     = &Fs{}
  1045  	_ fs.Object      = &Object{}
  1046  	_ fs.MimeTyper   = &Object{}
  1047  )