github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/drive/drive.go (about)

     1  // Package drive interfaces with the Google Drive object storage system
     2  package drive
     3  
     4  // FIXME need to deal with some corner cases
     5  // * multiple files with the same name
     6  // * files can be in multiple directories
     7  // * can have directory loops
     8  // * files with / in name
     9  
    10  import (
    11  	"bytes"
    12  	"context"
    13  	"fmt"
    14  	"io"
    15  	"io/ioutil"
    16  	"log"
    17  	"mime"
    18  	"net/http"
    19  	"net/url"
    20  	"os"
    21  	"path"
    22  	"sort"
    23  	"strconv"
    24  	"strings"
    25  	"sync"
    26  	"text/template"
    27  	"time"
    28  
    29  	"github.com/ncw/rclone/fs"
    30  	"github.com/ncw/rclone/fs/config"
    31  	"github.com/ncw/rclone/fs/config/configmap"
    32  	"github.com/ncw/rclone/fs/config/configstruct"
    33  	"github.com/ncw/rclone/fs/config/obscure"
    34  	"github.com/ncw/rclone/fs/fserrors"
    35  	"github.com/ncw/rclone/fs/fshttp"
    36  	"github.com/ncw/rclone/fs/hash"
    37  	"github.com/ncw/rclone/fs/walk"
    38  	"github.com/ncw/rclone/lib/dircache"
    39  	"github.com/ncw/rclone/lib/oauthutil"
    40  	"github.com/ncw/rclone/lib/pacer"
    41  	"github.com/ncw/rclone/lib/readers"
    42  	"github.com/pkg/errors"
    43  	"golang.org/x/oauth2"
    44  	"golang.org/x/oauth2/google"
    45  	drive_v2 "google.golang.org/api/drive/v2"
    46  	drive "google.golang.org/api/drive/v3"
    47  	"google.golang.org/api/googleapi"
    48  )
    49  
    50  // Constants
    51  const (
    52  	rcloneClientID              = "202264815644.apps.googleusercontent.com"
    53  	rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
    54  	driveFolderType             = "application/vnd.google-apps.folder"
    55  	timeFormatIn                = time.RFC3339
    56  	timeFormatOut               = "2006-01-02T15:04:05.000000000Z07:00"
    57  	defaultMinSleep             = fs.Duration(100 * time.Millisecond)
    58  	defaultBurst                = 100
    59  	defaultExportExtensions     = "docx,xlsx,pptx,svg"
    60  	scopePrefix                 = "https://www.googleapis.com/auth/"
    61  	defaultScope                = "drive"
    62  	// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
    63  	// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
    64  	minChunkSize     = 256 * fs.KibiByte
    65  	defaultChunkSize = 8 * fs.MebiByte
    66  	partialFields    = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
    67  )
    68  
    69  // Globals
    70  var (
    71  	// Description of how to auth for this app
    72  	driveConfig = &oauth2.Config{
    73  		Scopes:       []string{scopePrefix + "drive"},
    74  		Endpoint:     google.Endpoint,
    75  		ClientID:     rcloneClientID,
    76  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
    77  		RedirectURL:  oauthutil.TitleBarRedirectURL,
    78  	}
    79  	_mimeTypeToExtensionDuplicates = map[string]string{
    80  		"application/x-vnd.oasis.opendocument.presentation": ".odp",
    81  		"application/x-vnd.oasis.opendocument.spreadsheet":  ".ods",
    82  		"application/x-vnd.oasis.opendocument.text":         ".odt",
    83  		"image/jpg":   ".jpg",
    84  		"image/x-bmp": ".bmp",
    85  		"image/x-png": ".png",
    86  		"text/rtf":    ".rtf",
    87  	}
    88  	_mimeTypeToExtension = map[string]string{
    89  		"application/epub+zip":                            ".epub",
    90  		"application/json":                                ".json",
    91  		"application/msword":                              ".doc",
    92  		"application/pdf":                                 ".pdf",
    93  		"application/rtf":                                 ".rtf",
    94  		"application/vnd.ms-excel":                        ".xls",
    95  		"application/vnd.oasis.opendocument.presentation": ".odp",
    96  		"application/vnd.oasis.opendocument.spreadsheet":  ".ods",
    97  		"application/vnd.oasis.opendocument.text":         ".odt",
    98  		"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
    99  		"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":         ".xlsx",
   100  		"application/vnd.openxmlformats-officedocument.wordprocessingml.document":   ".docx",
   101  		"application/x-msmetafile":  ".wmf",
   102  		"application/zip":           ".zip",
   103  		"image/bmp":                 ".bmp",
   104  		"image/jpeg":                ".jpg",
   105  		"image/pjpeg":               ".pjpeg",
   106  		"image/png":                 ".png",
   107  		"image/svg+xml":             ".svg",
   108  		"text/csv":                  ".csv",
   109  		"text/html":                 ".html",
   110  		"text/plain":                ".txt",
   111  		"text/tab-separated-values": ".tsv",
   112  	}
   113  	_mimeTypeToExtensionLinks = map[string]string{
   114  		"application/x-link-desktop": ".desktop",
   115  		"application/x-link-html":    ".link.html",
   116  		"application/x-link-url":     ".url",
   117  		"application/x-link-webloc":  ".webloc",
   118  	}
   119  	_mimeTypeCustomTransform = map[string]string{
   120  		"application/vnd.google-apps.script+json": "application/json",
   121  	}
   122  	fetchFormatsOnce sync.Once                     // make sure we fetch the export/import formats only once
   123  	_exportFormats   map[string][]string           // allowed export MIME type conversions
   124  	_importFormats   map[string][]string           // allowed import MIME type conversions
   125  	templatesOnce    sync.Once                     // parse link templates only once
   126  	_linkTemplates   map[string]*template.Template // available link types
   127  )
   128  
   129  // Parse the scopes option returning a slice of scopes
   130  func driveScopes(scopesString string) (scopes []string) {
   131  	if scopesString == "" {
   132  		scopesString = defaultScope
   133  	}
   134  	for _, scope := range strings.Split(scopesString, ",") {
   135  		scope = strings.TrimSpace(scope)
   136  		scopes = append(scopes, scopePrefix+scope)
   137  	}
   138  	return scopes
   139  }
   140  
   141  // Returns true if one of the scopes was "drive.appfolder"
   142  func driveScopesContainsAppFolder(scopes []string) bool {
   143  	for _, scope := range scopes {
   144  		if scope == scopePrefix+"drive.appfolder" {
   145  			return true
   146  		}
   147  
   148  	}
   149  	return false
   150  }
   151  
   152  // Register with Fs
   153  func init() {
   154  	fs.Register(&fs.RegInfo{
   155  		Name:        "drive",
   156  		Description: "Google Drive",
   157  		NewFs:       NewFs,
   158  		Config: func(name string, m configmap.Mapper) {
   159  			// Parse config into Options struct
   160  			opt := new(Options)
   161  			err := configstruct.Set(m, opt)
   162  			if err != nil {
   163  				fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
   164  				return
   165  			}
   166  
   167  			// Fill in the scopes
   168  			driveConfig.Scopes = driveScopes(opt.Scope)
   169  			// Set the root_folder_id if using drive.appfolder
   170  			if driveScopesContainsAppFolder(driveConfig.Scopes) {
   171  				m.Set("root_folder_id", "appDataFolder")
   172  			}
   173  
   174  			if opt.ServiceAccountFile == "" {
   175  				err = oauthutil.Config("drive", name, m, driveConfig)
   176  				if err != nil {
   177  					log.Fatalf("Failed to configure token: %v", err)
   178  				}
   179  			}
   180  			err = configTeamDrive(opt, m, name)
   181  			if err != nil {
   182  				log.Fatalf("Failed to configure team drive: %v", err)
   183  			}
   184  		},
   185  		Options: []fs.Option{{
   186  			Name: config.ConfigClientID,
   187  			Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
   188  		}, {
   189  			Name: config.ConfigClientSecret,
   190  			Help: "Google Application Client Secret\nSetting your own is recommended.",
   191  		}, {
   192  			Name: "scope",
   193  			Help: "Scope that rclone should use when requesting access from drive.",
   194  			Examples: []fs.OptionExample{{
   195  				Value: "drive",
   196  				Help:  "Full access all files, excluding Application Data Folder.",
   197  			}, {
   198  				Value: "drive.readonly",
   199  				Help:  "Read-only access to file metadata and file contents.",
   200  			}, {
   201  				Value: "drive.file",
   202  				Help:  "Access to files created by rclone only.\nThese are visible in the drive website.\nFile authorization is revoked when the user deauthorizes the app.",
   203  			}, {
   204  				Value: "drive.appfolder",
   205  				Help:  "Allows read and write access to the Application Data folder.\nThis is not visible in the drive website.",
   206  			}, {
   207  				Value: "drive.metadata.readonly",
   208  				Help:  "Allows read-only access to file metadata but\ndoes not allow any access to read or download file content.",
   209  			}},
   210  		}, {
   211  			Name: "root_folder_id",
   212  			Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).",
   213  		}, {
   214  			Name: "service_account_file",
   215  			Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   216  		}, {
   217  			Name:     "service_account_credentials",
   218  			Help:     "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   219  			Hide:     fs.OptionHideConfigurator,
   220  			Advanced: true,
   221  		}, {
   222  			Name:     "team_drive",
   223  			Help:     "ID of the Team Drive",
   224  			Hide:     fs.OptionHideConfigurator,
   225  			Advanced: true,
   226  		}, {
   227  			Name:     "auth_owner_only",
   228  			Default:  false,
   229  			Help:     "Only consider files owned by the authenticated user.",
   230  			Advanced: true,
   231  		}, {
   232  			Name:     "use_trash",
   233  			Default:  true,
   234  			Help:     "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
   235  			Advanced: true,
   236  		}, {
   237  			Name:     "skip_gdocs",
   238  			Default:  false,
   239  			Help:     "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
   240  			Advanced: true,
   241  		}, {
   242  			Name:    "skip_checksum_gphotos",
   243  			Default: false,
   244  			Help: `Skip MD5 checksum on Google photos and videos only.
   245  
   246  Use this if you get checksum errors when transferring Google photos or
   247  videos.
   248  
   249  Setting this flag will cause Google photos and videos to return a
   250  blank MD5 checksum.
   251  
   252  Google photos are identifed by being in the "photos" space.
   253  
   254  Corrupted checksums are caused by Google modifying the image/video but
   255  not updating the checksum.`,
   256  			Advanced: true,
   257  		}, {
   258  			Name:    "shared_with_me",
   259  			Default: false,
   260  			Help: `Only show files that are shared with me.
   261  
   262  Instructs rclone to operate on your "Shared with me" folder (where
   263  Google Drive lets you access the files and folders others have shared
   264  with you).
   265  
   266  This works both with the "list" (lsd, lsl, etc) and the "copy"
   267  commands (copy, sync, etc), and with all other commands too.`,
   268  			Advanced: true,
   269  		}, {
   270  			Name:     "trashed_only",
   271  			Default:  false,
   272  			Help:     "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
   273  			Advanced: true,
   274  		}, {
   275  			Name:     "formats",
   276  			Default:  "",
   277  			Help:     "Deprecated: see export_formats",
   278  			Advanced: true,
   279  			Hide:     fs.OptionHideConfigurator,
   280  		}, {
   281  			Name:     "export_formats",
   282  			Default:  defaultExportExtensions,
   283  			Help:     "Comma separated list of preferred formats for downloading Google docs.",
   284  			Advanced: true,
   285  		}, {
   286  			Name:     "import_formats",
   287  			Default:  "",
   288  			Help:     "Comma separated list of preferred formats for uploading Google docs.",
   289  			Advanced: true,
   290  		}, {
   291  			Name:     "allow_import_name_change",
   292  			Default:  false,
   293  			Help:     "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
   294  			Advanced: true,
   295  		}, {
   296  			Name:    "use_created_date",
   297  			Default: false,
   298  			Help: `Use file created date instead of modified date.,
   299  
   300  Useful when downloading data and you want the creation date used in
   301  place of the last modified date.
   302  
   303  **WARNING**: This flag may have some unexpected consequences.
   304  
   305  When uploading to your drive all files will be overwritten unless they
   306  haven't been modified since their creation. And the inverse will occur
   307  while downloading.  This side effect can be avoided by using the
   308  "--checksum" flag.
   309  
   310  This feature was implemented to retain photos capture date as recorded
   311  by google photos. You will first need to check the "Create a Google
   312  Photos folder" option in your google drive settings. You can then copy
   313  or move the photos locally and use the date the image was taken
   314  (created) set as the modification date.`,
   315  			Advanced: true,
   316  		}, {
   317  			Name:     "list_chunk",
   318  			Default:  1000,
   319  			Help:     "Size of listing chunk 100-1000. 0 to disable.",
   320  			Advanced: true,
   321  		}, {
   322  			Name:     "impersonate",
   323  			Default:  "",
   324  			Help:     "Impersonate this user when using a service account.",
   325  			Advanced: true,
   326  		}, {
   327  			Name:    "alternate_export",
   328  			Default: false,
   329  			Help: `Use alternate export URLs for google documents export.,
   330  
   331  If this option is set this instructs rclone to use an alternate set of
   332  export URLs for drive documents.  Users have reported that the
   333  official export URLs can't export large documents, whereas these
   334  unofficial ones can.
   335  
   336  See rclone issue [#2243](https://github.com/ncw/rclone/issues/2243) for background,
   337  [this google drive issue](https://issuetracker.google.com/issues/36761333) and
   338  [this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
   339  			Advanced: true,
   340  		}, {
   341  			Name:     "upload_cutoff",
   342  			Default:  defaultChunkSize,
   343  			Help:     "Cutoff for switching to chunked upload",
   344  			Advanced: true,
   345  		}, {
   346  			Name:    "chunk_size",
   347  			Default: defaultChunkSize,
   348  			Help: `Upload chunk size. Must a power of 2 >= 256k.
   349  
   350  Making this larger will improve performance, but note that each chunk
   351  is buffered in memory one per transfer.
   352  
   353  Reducing this will reduce memory usage but decrease performance.`,
   354  			Advanced: true,
   355  		}, {
   356  			Name:    "acknowledge_abuse",
   357  			Default: false,
   358  			Help: `Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
   359  
   360  If downloading a file returns the error "This file has been identified
   361  as malware or spam and cannot be downloaded" with the error code
   362  "cannotDownloadAbusiveFile" then supply this flag to rclone to
   363  indicate you acknowledge the risks of downloading the file and rclone
   364  will download it anyway.`,
   365  			Advanced: true,
   366  		}, {
   367  			Name:     "keep_revision_forever",
   368  			Default:  false,
   369  			Help:     "Keep new head revision of each file forever.",
   370  			Advanced: true,
   371  		}, {
   372  			Name:    "size_as_quota",
   373  			Default: false,
   374  			Help: `Show storage quota usage for file size.
   375  
   376  The storage used by a file is the size of the current version plus any
   377  older versions that have been set to keep forever.`,
   378  			Advanced: true,
   379  		}, {
   380  			Name:     "v2_download_min_size",
   381  			Default:  fs.SizeSuffix(-1),
   382  			Help:     "If Object's are greater, use drive v2 API to download.",
   383  			Advanced: true,
   384  		}, {
   385  			Name:     "pacer_min_sleep",
   386  			Default:  defaultMinSleep,
   387  			Help:     "Minimum time to sleep between API calls.",
   388  			Advanced: true,
   389  		}, {
   390  			Name:     "pacer_burst",
   391  			Default:  defaultBurst,
   392  			Help:     "Number of API calls to allow without sleeping.",
   393  			Advanced: true,
   394  		}, {
   395  			Name:    "server_side_across_configs",
   396  			Default: false,
   397  			Help: `Allow server side operations (eg copy) to work across different drive configs.
   398  
   399  This can be useful if you wish to do a server side copy between two
   400  different Google drives.  Note that this isn't enabled by default
   401  because it isn't easy to tell if it will work beween any two
   402  configurations.`,
   403  			Advanced: true,
   404  		}},
   405  	})
   406  
   407  	// register duplicate MIME types first
   408  	// this allows them to be used with mime.ExtensionsByType() but
   409  	// mime.TypeByExtension() will return the later registered MIME type
   410  	for _, m := range []map[string]string{
   411  		_mimeTypeToExtensionDuplicates, _mimeTypeToExtension, _mimeTypeToExtensionLinks,
   412  	} {
   413  		for mimeType, extension := range m {
   414  			if err := mime.AddExtensionType(extension, mimeType); err != nil {
   415  				log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
   416  			}
   417  		}
   418  	}
   419  }
   420  
   421  // Options defines the configuration for this backend
   422  type Options struct {
   423  	Scope                     string        `config:"scope"`
   424  	RootFolderID              string        `config:"root_folder_id"`
   425  	ServiceAccountFile        string        `config:"service_account_file"`
   426  	ServiceAccountCredentials string        `config:"service_account_credentials"`
   427  	TeamDriveID               string        `config:"team_drive"`
   428  	AuthOwnerOnly             bool          `config:"auth_owner_only"`
   429  	UseTrash                  bool          `config:"use_trash"`
   430  	SkipGdocs                 bool          `config:"skip_gdocs"`
   431  	SkipChecksumGphotos       bool          `config:"skip_checksum_gphotos"`
   432  	SharedWithMe              bool          `config:"shared_with_me"`
   433  	TrashedOnly               bool          `config:"trashed_only"`
   434  	Extensions                string        `config:"formats"`
   435  	ExportExtensions          string        `config:"export_formats"`
   436  	ImportExtensions          string        `config:"import_formats"`
   437  	AllowImportNameChange     bool          `config:"allow_import_name_change"`
   438  	UseCreatedDate            bool          `config:"use_created_date"`
   439  	ListChunk                 int64         `config:"list_chunk"`
   440  	Impersonate               string        `config:"impersonate"`
   441  	AlternateExport           bool          `config:"alternate_export"`
   442  	UploadCutoff              fs.SizeSuffix `config:"upload_cutoff"`
   443  	ChunkSize                 fs.SizeSuffix `config:"chunk_size"`
   444  	AcknowledgeAbuse          bool          `config:"acknowledge_abuse"`
   445  	KeepRevisionForever       bool          `config:"keep_revision_forever"`
   446  	SizeAsQuota               bool          `config:"size_as_quota"`
   447  	V2DownloadMinSize         fs.SizeSuffix `config:"v2_download_min_size"`
   448  	PacerMinSleep             fs.Duration   `config:"pacer_min_sleep"`
   449  	PacerBurst                int           `config:"pacer_burst"`
   450  	ServerSideAcrossConfigs   bool          `config:"server_side_across_configs"`
   451  }
   452  
   453  // Fs represents a remote drive server
   454  type Fs struct {
   455  	name             string             // name of this remote
   456  	root             string             // the path we are working on
   457  	opt              Options            // parsed options
   458  	features         *fs.Features       // optional features
   459  	svc              *drive.Service     // the connection to the drive server
   460  	v2Svc            *drive_v2.Service  // used to create download links for the v2 api
   461  	client           *http.Client       // authorized client
   462  	rootFolderID     string             // the id of the root folder
   463  	dirCache         *dircache.DirCache // Map of directory path to directory id
   464  	pacer            *fs.Pacer          // To pace the API calls
   465  	exportExtensions []string           // preferred extensions to download docs
   466  	importMimeTypes  []string           // MIME types to convert to docs
   467  	isTeamDrive      bool               // true if this is a team drive
   468  }
   469  
   470  type baseObject struct {
   471  	fs           *Fs    // what this object is part of
   472  	remote       string // The remote path
   473  	id           string // Drive Id of this object
   474  	modifiedDate string // RFC3339 time it was last modified
   475  	mimeType     string // The object MIME type
   476  	bytes        int64  // size of the object
   477  }
   478  type documentObject struct {
   479  	baseObject
   480  	url              string // Download URL of this object
   481  	documentMimeType string // the original document MIME type
   482  	extLen           int    // The length of the added export extension
   483  }
   484  type linkObject struct {
   485  	baseObject
   486  	content []byte // The file content generated by a link template
   487  	extLen  int    // The length of the added export extension
   488  }
   489  
   490  // Object describes a drive object
   491  type Object struct {
   492  	baseObject
   493  	url        string // Download URL of this object
   494  	md5sum     string // md5sum of the object
   495  	v2Download bool   // generate v2 download link ondemand
   496  }
   497  
   498  // ------------------------------------------------------------
   499  
   500  // Name of the remote (as passed into NewFs)
   501  func (f *Fs) Name() string {
   502  	return f.name
   503  }
   504  
   505  // Root of the remote (as passed into NewFs)
   506  func (f *Fs) Root() string {
   507  	return f.root
   508  }
   509  
   510  // String converts this Fs to a string
   511  func (f *Fs) String() string {
   512  	return fmt.Sprintf("Google drive root '%s'", f.root)
   513  }
   514  
   515  // Features returns the optional features of this Fs
   516  func (f *Fs) Features() *fs.Features {
   517  	return f.features
   518  }
   519  
   520  // shouldRetry determines whether a given err rates being retried
   521  func shouldRetry(err error) (bool, error) {
   522  	if err == nil {
   523  		return false, nil
   524  	}
   525  	if fserrors.ShouldRetry(err) {
   526  		return true, err
   527  	}
   528  	switch gerr := err.(type) {
   529  	case *googleapi.Error:
   530  		if gerr.Code >= 500 && gerr.Code < 600 {
   531  			// All 5xx errors should be retried
   532  			return true, err
   533  		}
   534  		if len(gerr.Errors) > 0 {
   535  			reason := gerr.Errors[0].Reason
   536  			if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
   537  				return true, err
   538  			}
   539  		}
   540  	}
   541  	return false, err
   542  }
   543  
   544  // parseParse parses a drive 'url'
   545  func parseDrivePath(path string) (root string, err error) {
   546  	root = strings.Trim(path, "/")
   547  	return
   548  }
   549  
   550  // User function to process a File item from list
   551  //
   552  // Should return true to finish processing
   553  type listFn func(*drive.File) bool
   554  
   555  func containsString(slice []string, s string) bool {
   556  	for _, e := range slice {
   557  		if e == s {
   558  			return true
   559  		}
   560  	}
   561  	return false
   562  }
   563  
   564  // Lists the directory required calling the user function on each item found
   565  //
   566  // If the user fn ever returns true then it early exits with found = true
   567  //
   568  // Search params: https://developers.google.com/drive/search-parameters
   569  func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
   570  	var query []string
   571  	if !includeAll {
   572  		q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
   573  		if f.opt.TrashedOnly {
   574  			q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
   575  		}
   576  		query = append(query, q)
   577  	}
   578  	// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
   579  	// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
   580  	// If we need to list file inside those shared folders, we must search it without sharedWithMe
   581  	parentsQuery := bytes.NewBufferString("(")
   582  	for _, dirID := range dirIDs {
   583  		if dirID == "" {
   584  			continue
   585  		}
   586  		if parentsQuery.Len() > 1 {
   587  			_, _ = parentsQuery.WriteString(" or ")
   588  		}
   589  		if f.opt.SharedWithMe && dirID == f.rootFolderID {
   590  			_, _ = parentsQuery.WriteString("sharedWithMe=true")
   591  		} else {
   592  			_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
   593  		}
   594  	}
   595  	if parentsQuery.Len() > 1 {
   596  		_ = parentsQuery.WriteByte(')')
   597  		query = append(query, parentsQuery.String())
   598  	}
   599  	var stems []string
   600  	if title != "" {
   601  		// Escaping the backslash isn't documented but seems to work
   602  		searchTitle := strings.Replace(title, `\`, `\\`, -1)
   603  		searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
   604  		// Convert / to / for search
   605  		searchTitle = strings.Replace(searchTitle, "/", "/", -1)
   606  
   607  		var titleQuery bytes.Buffer
   608  		_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
   609  		if !directoriesOnly && !f.opt.SkipGdocs {
   610  			// If the search title has an extension that is in the export extensions add a search
   611  			// for the filename without the extension.
   612  			// Assume that export extensions don't contain escape sequences.
   613  			for _, ext := range f.exportExtensions {
   614  				if strings.HasSuffix(searchTitle, ext) {
   615  					stems = append(stems, title[:len(title)-len(ext)])
   616  					_, _ = fmt.Fprintf(&titleQuery, " or name='%s'", searchTitle[:len(searchTitle)-len(ext)])
   617  				}
   618  			}
   619  		}
   620  		_ = titleQuery.WriteByte(')')
   621  		query = append(query, titleQuery.String())
   622  	}
   623  	if directoriesOnly {
   624  		query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
   625  	}
   626  	if filesOnly {
   627  		query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
   628  	}
   629  	list := f.svc.Files.List()
   630  	if len(query) > 0 {
   631  		list.Q(strings.Join(query, " and "))
   632  		// fmt.Printf("list Query = %q\n", query)
   633  	}
   634  	if f.opt.ListChunk > 0 {
   635  		list.PageSize(f.opt.ListChunk)
   636  	}
   637  	if f.isTeamDrive {
   638  		list.TeamDriveId(f.opt.TeamDriveID)
   639  		list.SupportsTeamDrives(true)
   640  		list.IncludeTeamDriveItems(true)
   641  		list.Corpora("teamDrive")
   642  	}
   643  	// If using appDataFolder then need to add Spaces
   644  	if f.rootFolderID == "appDataFolder" {
   645  		list.Spaces("appDataFolder")
   646  	}
   647  
   648  	var fields = partialFields
   649  
   650  	if f.opt.AuthOwnerOnly {
   651  		fields += ",owners"
   652  	}
   653  	if f.opt.SkipChecksumGphotos {
   654  		fields += ",spaces"
   655  	}
   656  	if f.opt.SizeAsQuota {
   657  		fields += ",quotaBytesUsed"
   658  	}
   659  
   660  	fields = fmt.Sprintf("files(%s),nextPageToken", fields)
   661  
   662  OUTER:
   663  	for {
   664  		var files *drive.FileList
   665  		err = f.pacer.Call(func() (bool, error) {
   666  			files, err = list.Fields(googleapi.Field(fields)).Do()
   667  			return shouldRetry(err)
   668  		})
   669  		if err != nil {
   670  			return false, errors.Wrap(err, "couldn't list directory")
   671  		}
   672  		for _, item := range files.Files {
   673  			// Convert / to / for listing purposes
   674  			item.Name = strings.Replace(item.Name, "/", "/", -1)
   675  			// Check the case of items is correct since
   676  			// the `=` operator is case insensitive.
   677  
   678  			if title != "" && title != item.Name {
   679  				found := false
   680  				for _, stem := range stems {
   681  					if stem == item.Name {
   682  						found = true
   683  						break
   684  					}
   685  				}
   686  				if !found {
   687  					continue
   688  				}
   689  				_, exportName, _, _ := f.findExportFormat(item)
   690  				if exportName == "" || exportName != title {
   691  					continue
   692  				}
   693  			}
   694  			if fn(item) {
   695  				found = true
   696  				break OUTER
   697  			}
   698  		}
   699  		if files.NextPageToken == "" {
   700  			break
   701  		}
   702  		list.PageToken(files.NextPageToken)
   703  	}
   704  	return
   705  }
   706  
   707  // Returns true of x is a power of 2 or zero
   708  func isPowerOfTwo(x int64) bool {
   709  	switch {
   710  	case x == 0:
   711  		return true
   712  	case x < 0:
   713  		return false
   714  	default:
   715  		return (x & (x - 1)) == 0
   716  	}
   717  }
   718  
   719  // add a charset parameter to all text/* MIME types
   720  func fixMimeType(mimeTypeIn string) string {
   721  	if mimeTypeIn == "" {
   722  		return ""
   723  	}
   724  	mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
   725  	if err != nil {
   726  		return mimeTypeIn
   727  	}
   728  	mimeTypeOut := mimeTypeIn
   729  	if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
   730  		param["charset"] = "utf-8"
   731  		mimeTypeOut = mime.FormatMediaType(mediaType, param)
   732  	}
   733  	if mimeTypeOut == "" {
   734  		panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
   735  	}
   736  	return mimeTypeOut
   737  }
   738  func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
   739  	out = make(map[string][]string, len(in))
   740  	for k, v := range in {
   741  		for i, mt := range v {
   742  			v[i] = fixMimeType(mt)
   743  		}
   744  		out[fixMimeType(k)] = v
   745  	}
   746  	return out
   747  }
   748  func isInternalMimeType(mimeType string) bool {
   749  	return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
   750  }
   751  func isLinkMimeType(mimeType string) bool {
   752  	return strings.HasPrefix(mimeType, "application/x-link-")
   753  }
   754  
   755  // parseExtensions parses a list of comma separated extensions
   756  // into a list of unique extensions with leading "." and a list of associated MIME types
   757  func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
   758  	for _, extensionText := range extensionsIn {
   759  		for _, extension := range strings.Split(extensionText, ",") {
   760  			extension = strings.ToLower(strings.TrimSpace(extension))
   761  			if extension == "" {
   762  				continue
   763  			}
   764  			if len(extension) > 0 && extension[0] != '.' {
   765  				extension = "." + extension
   766  			}
   767  			mt := mime.TypeByExtension(extension)
   768  			if mt == "" {
   769  				return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
   770  			}
   771  			if !containsString(extensions, extension) {
   772  				extensions = append(extensions, extension)
   773  				mimeTypes = append(mimeTypes, mt)
   774  			}
   775  		}
   776  	}
   777  	return
   778  }
   779  
   780  // Figure out if the user wants to use a team drive
   781  func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
   782  	// Stop if we are running non-interactive config
   783  	if fs.Config.AutoConfirm {
   784  		return nil
   785  	}
   786  	if opt.TeamDriveID == "" {
   787  		fmt.Printf("Configure this as a team drive?\n")
   788  	} else {
   789  		fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
   790  	}
   791  	if !config.Confirm() {
   792  		return nil
   793  	}
   794  	client, err := createOAuthClient(opt, name, m)
   795  	if err != nil {
   796  		return errors.Wrap(err, "config team drive failed to create oauth client")
   797  	}
   798  	svc, err := drive.New(client)
   799  	if err != nil {
   800  		return errors.Wrap(err, "config team drive failed to make drive client")
   801  	}
   802  	fmt.Printf("Fetching team drive list...\n")
   803  	var driveIDs, driveNames []string
   804  	listTeamDrives := svc.Teamdrives.List().PageSize(100)
   805  	listFailed := false
   806  	for {
   807  		var teamDrives *drive.TeamDriveList
   808  		err = newPacer(opt).Call(func() (bool, error) {
   809  			teamDrives, err = listTeamDrives.Do()
   810  			return shouldRetry(err)
   811  		})
   812  		if err != nil {
   813  			fmt.Printf("Listing team drives failed: %v\n", err)
   814  			listFailed = true
   815  			break
   816  		}
   817  		for _, drive := range teamDrives.TeamDrives {
   818  			driveIDs = append(driveIDs, drive.Id)
   819  			driveNames = append(driveNames, drive.Name)
   820  		}
   821  		if teamDrives.NextPageToken == "" {
   822  			break
   823  		}
   824  		listTeamDrives.PageToken(teamDrives.NextPageToken)
   825  	}
   826  	var driveID string
   827  	if !listFailed && len(driveIDs) == 0 {
   828  		fmt.Printf("No team drives found in your account")
   829  	} else {
   830  		driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
   831  	}
   832  	m.Set("team_drive", driveID)
   833  	opt.TeamDriveID = driveID
   834  	return nil
   835  }
   836  
   837  // newPacer makes a pacer configured for drive
   838  func newPacer(opt *Options) *fs.Pacer {
   839  	return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
   840  }
   841  
   842  func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
   843  	scopes := driveScopes(opt.Scope)
   844  	conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
   845  	if err != nil {
   846  		return nil, errors.Wrap(err, "error processing credentials")
   847  	}
   848  	if opt.Impersonate != "" {
   849  		conf.Subject = opt.Impersonate
   850  	}
   851  	ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
   852  	return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
   853  }
   854  
   855  func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
   856  	var oAuthClient *http.Client
   857  	var err error
   858  
   859  	// try loading service account credentials from env variable, then from a file
   860  	if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
   861  		loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
   862  		if err != nil {
   863  			return nil, errors.Wrap(err, "error opening service account credentials file")
   864  		}
   865  		opt.ServiceAccountCredentials = string(loadedCreds)
   866  	}
   867  	if opt.ServiceAccountCredentials != "" {
   868  		oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
   869  		if err != nil {
   870  			return nil, errors.Wrap(err, "failed to create oauth client from service account")
   871  		}
   872  	} else {
   873  		oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig)
   874  		if err != nil {
   875  			return nil, errors.Wrap(err, "failed to create oauth client")
   876  		}
   877  	}
   878  
   879  	return oAuthClient, nil
   880  }
   881  
   882  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   883  	if !isPowerOfTwo(int64(cs)) {
   884  		return errors.Errorf("%v isn't a power of two", cs)
   885  	}
   886  	if cs < minChunkSize {
   887  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   888  	}
   889  	return nil
   890  }
   891  
   892  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   893  	err = checkUploadChunkSize(cs)
   894  	if err == nil {
   895  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   896  	}
   897  	return
   898  }
   899  
   900  func checkUploadCutoff(cs fs.SizeSuffix) error {
   901  	return nil
   902  }
   903  
   904  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   905  	err = checkUploadCutoff(cs)
   906  	if err == nil {
   907  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
   908  	}
   909  	return
   910  }
   911  
   912  // NewFs constructs an Fs from the path, container:path
   913  func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
   914  	ctx := context.Background()
   915  	// Parse config into Options struct
   916  	opt := new(Options)
   917  	err := configstruct.Set(m, opt)
   918  	if err != nil {
   919  		return nil, err
   920  	}
   921  	err = checkUploadCutoff(opt.UploadCutoff)
   922  	if err != nil {
   923  		return nil, errors.Wrap(err, "drive: upload cutoff")
   924  	}
   925  	err = checkUploadChunkSize(opt.ChunkSize)
   926  	if err != nil {
   927  		return nil, errors.Wrap(err, "drive: chunk size")
   928  	}
   929  
   930  	oAuthClient, err := createOAuthClient(opt, name, m)
   931  	if err != nil {
   932  		return nil, errors.Wrap(err, "drive: failed when making oauth client")
   933  	}
   934  
   935  	root, err := parseDrivePath(path)
   936  	if err != nil {
   937  		return nil, err
   938  	}
   939  
   940  	f := &Fs{
   941  		name:  name,
   942  		root:  root,
   943  		opt:   *opt,
   944  		pacer: newPacer(opt),
   945  	}
   946  	f.isTeamDrive = opt.TeamDriveID != ""
   947  	f.features = (&fs.Features{
   948  		DuplicateFiles:          true,
   949  		ReadMimeType:            true,
   950  		WriteMimeType:           true,
   951  		CanHaveEmptyDirectories: true,
   952  		ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
   953  	}).Fill(f)
   954  
   955  	// Create a new authorized Drive client.
   956  	f.client = oAuthClient
   957  	f.svc, err = drive.New(f.client)
   958  	if err != nil {
   959  		return nil, errors.Wrap(err, "couldn't create Drive client")
   960  	}
   961  
   962  	if f.opt.V2DownloadMinSize >= 0 {
   963  		f.v2Svc, err = drive_v2.New(f.client)
   964  		if err != nil {
   965  			return nil, errors.Wrap(err, "couldn't create Drive v2 client")
   966  		}
   967  	}
   968  
   969  	// set root folder for a team drive or query the user root folder
   970  	if f.isTeamDrive {
   971  		f.rootFolderID = f.opt.TeamDriveID
   972  	} else {
   973  		f.rootFolderID = "root"
   974  	}
   975  
   976  	// override root folder if set in the config
   977  	if opt.RootFolderID != "" {
   978  		f.rootFolderID = opt.RootFolderID
   979  	}
   980  
   981  	f.dirCache = dircache.New(root, f.rootFolderID, f)
   982  
   983  	// Parse extensions
   984  	if opt.Extensions != "" {
   985  		if opt.ExportExtensions != defaultExportExtensions {
   986  			return nil, errors.New("only one of 'formats' and 'export_formats' can be specified")
   987  		}
   988  		opt.Extensions, opt.ExportExtensions = "", opt.Extensions
   989  	}
   990  	f.exportExtensions, _, err = parseExtensions(opt.ExportExtensions, defaultExportExtensions)
   991  	if err != nil {
   992  		return nil, err
   993  	}
   994  
   995  	_, f.importMimeTypes, err = parseExtensions(opt.ImportExtensions)
   996  	if err != nil {
   997  		return nil, err
   998  	}
   999  
  1000  	// Find the current root
  1001  	err = f.dirCache.FindRoot(ctx, false)
  1002  	if err != nil {
  1003  		// Assume it is a file
  1004  		newRoot, remote := dircache.SplitPath(root)
  1005  		tempF := *f
  1006  		tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
  1007  		tempF.root = newRoot
  1008  		// Make new Fs which is the parent
  1009  		err = tempF.dirCache.FindRoot(ctx, false)
  1010  		if err != nil {
  1011  			// No root so return old f
  1012  			return f, nil
  1013  		}
  1014  		_, err := tempF.NewObject(ctx, remote)
  1015  		if err != nil {
  1016  			// unable to list folder so return old f
  1017  			return f, nil
  1018  		}
  1019  		// XXX: update the old f here instead of returning tempF, since
  1020  		// `features` were already filled with functions having *f as a receiver.
  1021  		// See https://github.com/ncw/rclone/issues/2182
  1022  		f.dirCache = tempF.dirCache
  1023  		f.root = tempF.root
  1024  		return f, fs.ErrorIsFile
  1025  	}
  1026  	// fmt.Printf("Root id %s", f.dirCache.RootID())
  1027  	return f, nil
  1028  }
  1029  
  1030  func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
  1031  	modifiedDate := info.ModifiedTime
  1032  	if f.opt.UseCreatedDate {
  1033  		modifiedDate = info.CreatedTime
  1034  	}
  1035  	size := info.Size
  1036  	if f.opt.SizeAsQuota {
  1037  		size = info.QuotaBytesUsed
  1038  	}
  1039  	return baseObject{
  1040  		fs:           f,
  1041  		remote:       remote,
  1042  		id:           info.Id,
  1043  		modifiedDate: modifiedDate,
  1044  		mimeType:     info.MimeType,
  1045  		bytes:        size,
  1046  	}
  1047  }
  1048  
  1049  // newRegularObject creates a fs.Object for a normal drive.File
  1050  func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
  1051  	// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
  1052  	if f.opt.SkipChecksumGphotos {
  1053  		for _, space := range info.Spaces {
  1054  			if space == "photos" {
  1055  				info.Md5Checksum = ""
  1056  				break
  1057  			}
  1058  		}
  1059  	}
  1060  	return &Object{
  1061  		baseObject: f.newBaseObject(remote, info),
  1062  		url:        fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
  1063  		md5sum:     strings.ToLower(info.Md5Checksum),
  1064  		v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
  1065  	}
  1066  }
  1067  
  1068  // newDocumentObject creates a fs.Object for a google docs drive.File
  1069  func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
  1070  	mediaType, _, err := mime.ParseMediaType(exportMimeType)
  1071  	if err != nil {
  1072  		return nil, err
  1073  	}
  1074  	url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, info.Id, url.QueryEscape(mediaType))
  1075  	if f.opt.AlternateExport {
  1076  		switch info.MimeType {
  1077  		case "application/vnd.google-apps.drawing":
  1078  			url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension[1:])
  1079  		case "application/vnd.google-apps.document":
  1080  			url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension[1:])
  1081  		case "application/vnd.google-apps.spreadsheet":
  1082  			url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension[1:])
  1083  		case "application/vnd.google-apps.presentation":
  1084  			url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension[1:])
  1085  		}
  1086  	}
  1087  	baseObject := f.newBaseObject(remote+extension, info)
  1088  	baseObject.bytes = -1
  1089  	baseObject.mimeType = exportMimeType
  1090  	return &documentObject{
  1091  		baseObject:       baseObject,
  1092  		url:              url,
  1093  		documentMimeType: info.MimeType,
  1094  		extLen:           len(extension),
  1095  	}, nil
  1096  }
  1097  
  1098  // newLinkObject creates a fs.Object that represents a link a google docs drive.File
  1099  func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
  1100  	t := linkTemplate(exportMimeType)
  1101  	if t == nil {
  1102  		return nil, errors.Errorf("unsupported link type %s", exportMimeType)
  1103  	}
  1104  	var buf bytes.Buffer
  1105  	err := t.Execute(&buf, struct {
  1106  		URL, Title string
  1107  	}{
  1108  		info.WebViewLink, info.Name,
  1109  	})
  1110  	if err != nil {
  1111  		return nil, errors.Wrap(err, "executing template failed")
  1112  	}
  1113  
  1114  	baseObject := f.newBaseObject(remote+extension, info)
  1115  	baseObject.bytes = int64(buf.Len())
  1116  	baseObject.mimeType = exportMimeType
  1117  	return &linkObject{
  1118  		baseObject: baseObject,
  1119  		content:    buf.Bytes(),
  1120  		extLen:     len(extension),
  1121  	}, nil
  1122  }
  1123  
  1124  // newObjectWithInfo creates a fs.Object for any drive.File
  1125  //
  1126  // When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
  1127  func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
  1128  	// If item has MD5 sum or a length it is a file stored on drive
  1129  	if info.Md5Checksum != "" || info.Size > 0 {
  1130  		return f.newRegularObject(remote, info), nil
  1131  	}
  1132  
  1133  	extension, exportName, exportMimeType, isDocument := f.findExportFormat(info)
  1134  	return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
  1135  }
  1136  
  1137  // newObjectWithExportInfo creates a fs.Object for any drive.File and the result of findExportFormat
  1138  //
  1139  // When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
  1140  func (f *Fs) newObjectWithExportInfo(
  1141  	remote string, info *drive.File,
  1142  	extension, exportName, exportMimeType string, isDocument bool) (fs.Object, error) {
  1143  	switch {
  1144  	case info.Md5Checksum != "" || info.Size > 0:
  1145  		// If item has MD5 sum or a length it is a file stored on drive
  1146  		return f.newRegularObject(remote, info), nil
  1147  	case f.opt.SkipGdocs:
  1148  		fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
  1149  		return nil, nil
  1150  	default:
  1151  		// If item MimeType is in the ExportFormats then it is a google doc
  1152  		if !isDocument {
  1153  			fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType)
  1154  			return nil, nil
  1155  		}
  1156  		if extension == "" {
  1157  			fs.Debugf(remote, "No export formats found for %q", info.MimeType)
  1158  			return nil, nil
  1159  		}
  1160  		if isLinkMimeType(exportMimeType) {
  1161  			return f.newLinkObject(remote, info, extension, exportMimeType)
  1162  		}
  1163  		return f.newDocumentObject(remote, info, extension, exportMimeType)
  1164  	}
  1165  }
  1166  
  1167  // NewObject finds the Object at remote.  If it can't be found
  1168  // it returns the error fs.ErrorObjectNotFound.
  1169  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  1170  	info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
  1171  	if err != nil {
  1172  		return nil, err
  1173  	}
  1174  
  1175  	remote = remote[:len(remote)-len(extension)]
  1176  	obj, err := f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
  1177  	switch {
  1178  	case err != nil:
  1179  		return nil, err
  1180  	case obj == nil:
  1181  		return nil, fs.ErrorObjectNotFound
  1182  	default:
  1183  		return obj, nil
  1184  	}
  1185  }
  1186  
  1187  // FindLeaf finds a directory of name leaf in the folder with ID pathID
  1188  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
  1189  	// Find the leaf in pathID
  1190  	found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
  1191  		if !f.opt.SkipGdocs {
  1192  			_, exportName, _, isDocument := f.findExportFormat(item)
  1193  			if exportName == leaf {
  1194  				pathIDOut = item.Id
  1195  				return true
  1196  			}
  1197  			if isDocument {
  1198  				return false
  1199  			}
  1200  		}
  1201  		if item.Name == leaf {
  1202  			pathIDOut = item.Id
  1203  			return true
  1204  		}
  1205  		return false
  1206  	})
  1207  	return pathIDOut, found, err
  1208  }
  1209  
  1210  // CreateDir makes a directory with pathID as parent and name leaf
  1211  func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
  1212  	// fmt.Println("Making", path)
  1213  	// Define the metadata for the directory we are going to create.
  1214  	createInfo := &drive.File{
  1215  		Name:        leaf,
  1216  		Description: leaf,
  1217  		MimeType:    driveFolderType,
  1218  		Parents:     []string{pathID},
  1219  	}
  1220  	var info *drive.File
  1221  	err = f.pacer.Call(func() (bool, error) {
  1222  		info, err = f.svc.Files.Create(createInfo).
  1223  			Fields("id").
  1224  			SupportsTeamDrives(f.isTeamDrive).
  1225  			Do()
  1226  		return shouldRetry(err)
  1227  	})
  1228  	if err != nil {
  1229  		return "", err
  1230  	}
  1231  	return info.Id, nil
  1232  }
  1233  
  1234  // isAuthOwned checks if any of the item owners is the authenticated owner
  1235  func isAuthOwned(item *drive.File) bool {
  1236  	for _, owner := range item.Owners {
  1237  		if owner.Me {
  1238  			return true
  1239  		}
  1240  	}
  1241  	return false
  1242  }
  1243  
  1244  // linkTemplate returns the Template for a MIME type or nil if the
  1245  // MIME type does not represent a link
  1246  func linkTemplate(mt string) *template.Template {
  1247  	templatesOnce.Do(func() {
  1248  		_linkTemplates = map[string]*template.Template{
  1249  			"application/x-link-desktop": template.Must(
  1250  				template.New("application/x-link-desktop").Parse(desktopTemplate)),
  1251  			"application/x-link-html": template.Must(
  1252  				template.New("application/x-link-html").Parse(htmlTemplate)),
  1253  			"application/x-link-url": template.Must(
  1254  				template.New("application/x-link-url").Parse(urlTemplate)),
  1255  			"application/x-link-webloc": template.Must(
  1256  				template.New("application/x-link-webloc").Parse(weblocTemplate)),
  1257  		}
  1258  	})
  1259  	return _linkTemplates[mt]
  1260  }
  1261  func (f *Fs) fetchFormats() {
  1262  	fetchFormatsOnce.Do(func() {
  1263  		var about *drive.About
  1264  		var err error
  1265  		err = f.pacer.Call(func() (bool, error) {
  1266  			about, err = f.svc.About.Get().
  1267  				Fields("exportFormats,importFormats").
  1268  				Do()
  1269  			return shouldRetry(err)
  1270  		})
  1271  		if err != nil {
  1272  			fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
  1273  			_exportFormats = map[string][]string{}
  1274  			_importFormats = map[string][]string{}
  1275  			return
  1276  		}
  1277  		_exportFormats = fixMimeTypeMap(about.ExportFormats)
  1278  		_importFormats = fixMimeTypeMap(about.ImportFormats)
  1279  	})
  1280  }
  1281  
  1282  // exportFormats returns the export formats from drive, fetching them
  1283  // if necessary.
  1284  //
  1285  // if the fetch fails then it will not export any drive formats
  1286  func (f *Fs) exportFormats() map[string][]string {
  1287  	f.fetchFormats()
  1288  	return _exportFormats
  1289  }
  1290  
  1291  // importFormats returns the import formats from drive, fetching them
  1292  // if necessary.
  1293  //
  1294  // if the fetch fails then it will not import any drive formats
  1295  func (f *Fs) importFormats() map[string][]string {
  1296  	f.fetchFormats()
  1297  	return _importFormats
  1298  }
  1299  
  1300  // findExportFormatByMimeType works out the optimum export settings
  1301  // for the given MIME type.
  1302  //
  1303  // Look through the exportExtensions and find the first format that can be
  1304  // converted.  If none found then return ("", "", false)
  1305  func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
  1306  	extension, mimeType string, isDocument bool) {
  1307  	exportMimeTypes, isDocument := f.exportFormats()[itemMimeType]
  1308  	if isDocument {
  1309  		for _, _extension := range f.exportExtensions {
  1310  			_mimeType := mime.TypeByExtension(_extension)
  1311  			if isLinkMimeType(_mimeType) {
  1312  				return _extension, _mimeType, true
  1313  			}
  1314  			for _, emt := range exportMimeTypes {
  1315  				if emt == _mimeType {
  1316  					return _extension, emt, true
  1317  				}
  1318  				if _mimeType == _mimeTypeCustomTransform[emt] {
  1319  					return _extension, emt, true
  1320  				}
  1321  			}
  1322  		}
  1323  	}
  1324  
  1325  	// else return empty
  1326  	return "", "", isDocument
  1327  }
  1328  
  1329  // findExportFormatByMimeType works out the optimum export settings
  1330  // for the given drive.File.
  1331  //
  1332  // Look through the exportExtensions and find the first format that can be
  1333  // converted.  If none found then return ("", "", "", false)
  1334  func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
  1335  	extension, mimeType, isDocument = f.findExportFormatByMimeType(item.MimeType)
  1336  	if extension != "" {
  1337  		filename = item.Name + extension
  1338  	}
  1339  	return
  1340  }
  1341  
  1342  // findImportFormat finds the matching upload MIME type for a file
  1343  // If the given MIME type is in importMimeTypes, the matching upload
  1344  // MIME type is returned
  1345  //
  1346  // When no match is found "" is returned.
  1347  func (f *Fs) findImportFormat(mimeType string) string {
  1348  	mimeType = fixMimeType(mimeType)
  1349  	ifs := f.importFormats()
  1350  	for _, mt := range f.importMimeTypes {
  1351  		if mt == mimeType {
  1352  			importMimeTypes := ifs[mimeType]
  1353  			if l := len(importMimeTypes); l > 0 {
  1354  				if l > 1 {
  1355  					fs.Infof(f, "found %d import formats for %q: %q", l, mimeType, importMimeTypes)
  1356  				}
  1357  				return importMimeTypes[0]
  1358  			}
  1359  		}
  1360  	}
  1361  	return ""
  1362  }
  1363  
  1364  // List the objects and directories in dir into entries.  The
  1365  // entries can be returned in any order but should be for a
  1366  // complete directory.
  1367  //
  1368  // dir should be "" to list the root, and should not have
  1369  // trailing slashes.
  1370  //
  1371  // This should return ErrDirNotFound if the directory isn't
  1372  // found.
  1373  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  1374  	err = f.dirCache.FindRoot(ctx, false)
  1375  	if err != nil {
  1376  		return nil, err
  1377  	}
  1378  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
  1379  	if err != nil {
  1380  		return nil, err
  1381  	}
  1382  
  1383  	var iErr error
  1384  	_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
  1385  		entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
  1386  		if err != nil {
  1387  			iErr = err
  1388  			return true
  1389  		}
  1390  		if entry != nil {
  1391  			entries = append(entries, entry)
  1392  		}
  1393  		return false
  1394  	})
  1395  	if err != nil {
  1396  		return nil, err
  1397  	}
  1398  	if iErr != nil {
  1399  		return nil, iErr
  1400  	}
  1401  	return entries, nil
  1402  }
  1403  
  1404  // listREntry is a task to be executed by a litRRunner
  1405  type listREntry struct {
  1406  	id, path string
  1407  }
  1408  
  1409  // listRSlices is a helper struct to sort two slices at once
  1410  type listRSlices struct {
  1411  	dirs  []string
  1412  	paths []string
  1413  }
  1414  
  1415  func (s listRSlices) Sort() {
  1416  	sort.Sort(s)
  1417  }
  1418  
  1419  func (s listRSlices) Len() int {
  1420  	return len(s.dirs)
  1421  }
  1422  
  1423  func (s listRSlices) Swap(i, j int) {
  1424  	s.dirs[i], s.dirs[j] = s.dirs[j], s.dirs[i]
  1425  	s.paths[i], s.paths[j] = s.paths[j], s.paths[i]
  1426  }
  1427  
  1428  func (s listRSlices) Less(i, j int) bool {
  1429  	return s.dirs[i] < s.dirs[j]
  1430  }
  1431  
  1432  // listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry.
  1433  //
  1434  // In each cycle it will read up to grouping entries from the in channel without blocking.
  1435  // If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
  1436  // nil is send to the out channel and the function returns.
  1437  func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
  1438  	var dirs []string
  1439  	var paths []string
  1440  
  1441  	for dir := range in {
  1442  		dirs = append(dirs[:0], dir.id)
  1443  		paths = append(paths[:0], dir.path)
  1444  	waitloop:
  1445  		for i := 1; i < grouping; i++ {
  1446  			select {
  1447  			case d, ok := <-in:
  1448  				if !ok {
  1449  					break waitloop
  1450  				}
  1451  				dirs = append(dirs, d.id)
  1452  				paths = append(paths, d.path)
  1453  			default:
  1454  			}
  1455  		}
  1456  		listRSlices{dirs, paths}.Sort()
  1457  		var iErr error
  1458  		_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
  1459  			for _, parent := range item.Parents {
  1460  				// only handle parents that are in the requested dirs list
  1461  				i := sort.SearchStrings(dirs, parent)
  1462  				if i == len(dirs) || dirs[i] != parent {
  1463  					continue
  1464  				}
  1465  				remote := path.Join(paths[i], item.Name)
  1466  				entry, err := f.itemToDirEntry(remote, item)
  1467  				if err != nil {
  1468  					iErr = err
  1469  					return true
  1470  				}
  1471  
  1472  				err = cb(entry)
  1473  				if err != nil {
  1474  					iErr = err
  1475  					return true
  1476  				}
  1477  			}
  1478  			return false
  1479  		})
  1480  		for range dirs {
  1481  			wg.Done()
  1482  		}
  1483  
  1484  		if iErr != nil {
  1485  			out <- iErr
  1486  			return
  1487  		}
  1488  
  1489  		if err != nil {
  1490  			out <- err
  1491  			return
  1492  		}
  1493  	}
  1494  	out <- nil
  1495  }
  1496  
  1497  // ListR lists the objects and directories of the Fs starting
  1498  // from dir recursively into out.
  1499  //
  1500  // dir should be "" to start from the root, and should not
  1501  // have trailing slashes.
  1502  //
  1503  // This should return ErrDirNotFound if the directory isn't
  1504  // found.
  1505  //
  1506  // It should call callback for each tranche of entries read.
  1507  // These need not be returned in any particular order.  If
  1508  // callback returns an error then the listing will stop
  1509  // immediately.
  1510  //
  1511  // Don't implement this unless you have a more efficient way
  1512  // of listing recursively that doing a directory traversal.
  1513  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1514  	const (
  1515  		grouping    = 50
  1516  		inputBuffer = 1000
  1517  	)
  1518  
  1519  	err = f.dirCache.FindRoot(ctx, false)
  1520  	if err != nil {
  1521  		return err
  1522  	}
  1523  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
  1524  	if err != nil {
  1525  		return err
  1526  	}
  1527  	if directoryID == "root" {
  1528  		var info *drive.File
  1529  		err = f.pacer.CallNoRetry(func() (bool, error) {
  1530  			info, err = f.svc.Files.Get("root").
  1531  				Fields("id").
  1532  				SupportsTeamDrives(f.isTeamDrive).
  1533  				Do()
  1534  			return shouldRetry(err)
  1535  		})
  1536  		if err != nil {
  1537  			return err
  1538  		}
  1539  		directoryID = info.Id
  1540  	}
  1541  
  1542  	mu := sync.Mutex{} // protects in and overflow
  1543  	wg := sync.WaitGroup{}
  1544  	in := make(chan listREntry, inputBuffer)
  1545  	out := make(chan error, fs.Config.Checkers)
  1546  	list := walk.NewListRHelper(callback)
  1547  	overflow := []listREntry{}
  1548  
  1549  	cb := func(entry fs.DirEntry) error {
  1550  		mu.Lock()
  1551  		defer mu.Unlock()
  1552  		if d, isDir := entry.(*fs.Dir); isDir && in != nil {
  1553  			select {
  1554  			case in <- listREntry{d.ID(), d.Remote()}:
  1555  				wg.Add(1)
  1556  			default:
  1557  				overflow = append(overflow, listREntry{d.ID(), d.Remote()})
  1558  			}
  1559  		}
  1560  		return list.Add(entry)
  1561  	}
  1562  
  1563  	wg.Add(1)
  1564  	in <- listREntry{directoryID, dir}
  1565  
  1566  	for i := 0; i < fs.Config.Checkers; i++ {
  1567  		go f.listRRunner(ctx, &wg, in, out, cb, grouping)
  1568  	}
  1569  	go func() {
  1570  		// wait until the all directories are processed
  1571  		wg.Wait()
  1572  		// if the input channel overflowed add the collected entries to the channel now
  1573  		for len(overflow) > 0 {
  1574  			mu.Lock()
  1575  			l := len(overflow)
  1576  			// only fill half of the channel to prevent entries beeing put into overflow again
  1577  			if l > inputBuffer/2 {
  1578  				l = inputBuffer / 2
  1579  			}
  1580  			wg.Add(l)
  1581  			for _, d := range overflow[:l] {
  1582  				in <- d
  1583  			}
  1584  			overflow = overflow[l:]
  1585  			mu.Unlock()
  1586  
  1587  			// wait again for the completion of all directories
  1588  			wg.Wait()
  1589  		}
  1590  		mu.Lock()
  1591  		if in != nil {
  1592  			// notify all workers to exit
  1593  			close(in)
  1594  			in = nil
  1595  		}
  1596  		mu.Unlock()
  1597  	}()
  1598  	// wait until the all workers to finish
  1599  	for i := 0; i < fs.Config.Checkers; i++ {
  1600  		e := <-out
  1601  		mu.Lock()
  1602  		// if one worker returns an error early, close the input so all other workers exit
  1603  		if e != nil && in != nil {
  1604  			err = e
  1605  			close(in)
  1606  			in = nil
  1607  		}
  1608  		mu.Unlock()
  1609  	}
  1610  
  1611  	close(out)
  1612  	if err != nil {
  1613  		return err
  1614  	}
  1615  
  1616  	return list.Flush()
  1617  }
  1618  
  1619  // itemToDirEntry converts a drive.File to a fs.DirEntry.
  1620  // When the drive.File cannot be represented as a fs.DirEntry
  1621  // (nil, nil) is returned.
  1622  func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
  1623  	switch {
  1624  	case item.MimeType == driveFolderType:
  1625  		// cache the directory ID for later lookups
  1626  		f.dirCache.Put(remote, item.Id)
  1627  		when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
  1628  		d := fs.NewDir(remote, when).SetID(item.Id)
  1629  		return d, nil
  1630  	case f.opt.AuthOwnerOnly && !isAuthOwned(item):
  1631  		// ignore object
  1632  	default:
  1633  		return f.newObjectWithInfo(remote, item)
  1634  	}
  1635  	return nil, nil
  1636  }
  1637  
  1638  // Creates a drive.File info from the parameters passed in.
  1639  //
  1640  // Used to create new objects
  1641  func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) {
  1642  	leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
  1643  	if err != nil {
  1644  		return nil, err
  1645  	}
  1646  
  1647  	// Define the metadata for the file we are going to create.
  1648  	createInfo := &drive.File{
  1649  		Name:         leaf,
  1650  		Description:  leaf,
  1651  		Parents:      []string{directoryID},
  1652  		ModifiedTime: modTime.Format(timeFormatOut),
  1653  	}
  1654  	return createInfo, nil
  1655  }
  1656  
  1657  // Put the object
  1658  //
  1659  // Copy the reader in to the new object which is returned
  1660  //
  1661  // The new object may have been created if an error is returned
  1662  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1663  	exisitingObj, err := f.NewObject(ctx, src.Remote())
  1664  	switch err {
  1665  	case nil:
  1666  		return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
  1667  	case fs.ErrorObjectNotFound:
  1668  		// Not found so create it
  1669  		return f.PutUnchecked(ctx, in, src, options...)
  1670  	default:
  1671  		return nil, err
  1672  	}
  1673  }
  1674  
  1675  // PutStream uploads to the remote path with the modTime given of indeterminate size
  1676  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1677  	return f.Put(ctx, in, src, options...)
  1678  }
  1679  
  1680  // PutUnchecked uploads the object
  1681  //
  1682  // This will create a duplicate if we upload a new file without
  1683  // checking to see if there is one already - use Put() for that.
  1684  func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1685  	remote := src.Remote()
  1686  	size := src.Size()
  1687  	modTime := src.ModTime(ctx)
  1688  	srcMimeType := fs.MimeTypeFromName(remote)
  1689  	srcExt := path.Ext(remote)
  1690  	exportExt := ""
  1691  	importMimeType := ""
  1692  
  1693  	if f.importMimeTypes != nil && !f.opt.SkipGdocs {
  1694  		importMimeType = f.findImportFormat(srcMimeType)
  1695  
  1696  		if isInternalMimeType(importMimeType) {
  1697  			remote = remote[:len(remote)-len(srcExt)]
  1698  
  1699  			exportExt, _, _ = f.findExportFormatByMimeType(importMimeType)
  1700  			if exportExt == "" {
  1701  				return nil, errors.Errorf("No export format found for %q", importMimeType)
  1702  			}
  1703  			if exportExt != srcExt && !f.opt.AllowImportNameChange {
  1704  				return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
  1705  			}
  1706  		}
  1707  	}
  1708  
  1709  	createInfo, err := f.createFileInfo(ctx, remote, modTime)
  1710  	if err != nil {
  1711  		return nil, err
  1712  	}
  1713  	if importMimeType != "" {
  1714  		createInfo.MimeType = importMimeType
  1715  	} else {
  1716  		createInfo.MimeType = fs.MimeTypeFromName(remote)
  1717  	}
  1718  
  1719  	var info *drive.File
  1720  	if size == 0 || size < int64(f.opt.UploadCutoff) {
  1721  		// Make the API request to upload metadata and file data.
  1722  		// Don't retry, return a retry error instead
  1723  		err = f.pacer.CallNoRetry(func() (bool, error) {
  1724  			info, err = f.svc.Files.Create(createInfo).
  1725  				Media(in, googleapi.ContentType(srcMimeType)).
  1726  				Fields(partialFields).
  1727  				SupportsTeamDrives(f.isTeamDrive).
  1728  				KeepRevisionForever(f.opt.KeepRevisionForever).
  1729  				Do()
  1730  			return shouldRetry(err)
  1731  		})
  1732  		if err != nil {
  1733  			return nil, err
  1734  		}
  1735  	} else {
  1736  		// Upload the file in chunks
  1737  		info, err = f.Upload(in, size, srcMimeType, "", remote, createInfo)
  1738  		if err != nil {
  1739  			return nil, err
  1740  		}
  1741  	}
  1742  	return f.newObjectWithInfo(remote, info)
  1743  }
  1744  
  1745  // MergeDirs merges the contents of all the directories passed
  1746  // in into the first one and rmdirs the other directories.
  1747  func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
  1748  	if len(dirs) < 2 {
  1749  		return nil
  1750  	}
  1751  	dstDir := dirs[0]
  1752  	for _, srcDir := range dirs[1:] {
  1753  		// list the the objects
  1754  		infos := []*drive.File{}
  1755  		_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
  1756  			infos = append(infos, info)
  1757  			return false
  1758  		})
  1759  		if err != nil {
  1760  			return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
  1761  		}
  1762  		// move them into place
  1763  		for _, info := range infos {
  1764  			fs.Infof(srcDir, "merging %q", info.Name)
  1765  			// Move the file into the destination
  1766  			err = f.pacer.Call(func() (bool, error) {
  1767  				_, err = f.svc.Files.Update(info.Id, nil).
  1768  					RemoveParents(srcDir.ID()).
  1769  					AddParents(dstDir.ID()).
  1770  					Fields("").
  1771  					SupportsTeamDrives(f.isTeamDrive).
  1772  					Do()
  1773  				return shouldRetry(err)
  1774  			})
  1775  			if err != nil {
  1776  				return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
  1777  			}
  1778  		}
  1779  		// rmdir (into trash) the now empty source directory
  1780  		fs.Infof(srcDir, "removing empty directory")
  1781  		err = f.rmdir(ctx, srcDir.ID(), true)
  1782  		if err != nil {
  1783  			return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
  1784  		}
  1785  	}
  1786  	return nil
  1787  }
  1788  
  1789  // Mkdir creates the container if it doesn't exist
  1790  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1791  	err := f.dirCache.FindRoot(ctx, true)
  1792  	if err != nil {
  1793  		return err
  1794  	}
  1795  	if dir != "" {
  1796  		_, err = f.dirCache.FindDir(ctx, dir, true)
  1797  	}
  1798  	return err
  1799  }
  1800  
  1801  // Rmdir deletes a directory unconditionally by ID
  1802  func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error {
  1803  	return f.pacer.Call(func() (bool, error) {
  1804  		var err error
  1805  		if useTrash {
  1806  			info := drive.File{
  1807  				Trashed: true,
  1808  			}
  1809  			_, err = f.svc.Files.Update(directoryID, &info).
  1810  				Fields("").
  1811  				SupportsTeamDrives(f.isTeamDrive).
  1812  				Do()
  1813  		} else {
  1814  			err = f.svc.Files.Delete(directoryID).
  1815  				Fields("").
  1816  				SupportsTeamDrives(f.isTeamDrive).
  1817  				Do()
  1818  		}
  1819  		return shouldRetry(err)
  1820  	})
  1821  }
  1822  
  1823  // Rmdir deletes a directory
  1824  //
  1825  // Returns an error if it isn't empty
  1826  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1827  	root := path.Join(f.root, dir)
  1828  	dc := f.dirCache
  1829  	directoryID, err := dc.FindDir(ctx, dir, false)
  1830  	if err != nil {
  1831  		return err
  1832  	}
  1833  	var trashedFiles = false
  1834  	found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
  1835  		if !item.Trashed {
  1836  			fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
  1837  			return true
  1838  		}
  1839  		fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
  1840  		trashedFiles = true
  1841  		return false
  1842  	})
  1843  	if err != nil {
  1844  		return err
  1845  	}
  1846  	if found {
  1847  		return errors.Errorf("directory not empty")
  1848  	}
  1849  	if root != "" {
  1850  		// trash the directory if it had trashed files
  1851  		// in or the user wants to trash, otherwise
  1852  		// delete it.
  1853  		err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash)
  1854  		if err != nil {
  1855  			return err
  1856  		}
  1857  	}
  1858  	f.dirCache.FlushDir(dir)
  1859  	if err != nil {
  1860  		return err
  1861  	}
  1862  	return nil
  1863  }
  1864  
  1865  // Precision of the object storage system
  1866  func (f *Fs) Precision() time.Duration {
  1867  	return time.Millisecond
  1868  }
  1869  
  1870  // Copy src to this remote using server side copy operations.
  1871  //
  1872  // This is stored with the remote path given
  1873  //
  1874  // It returns the destination Object and a possible error
  1875  //
  1876  // Will only be called if src.Fs().Name() == f.Name()
  1877  //
  1878  // If it isn't possible then return fs.ErrorCantCopy
  1879  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1880  	var srcObj *baseObject
  1881  	ext := ""
  1882  	switch src := src.(type) {
  1883  	case *Object:
  1884  		srcObj = &src.baseObject
  1885  	case *documentObject:
  1886  		srcObj, ext = &src.baseObject, src.ext()
  1887  	case *linkObject:
  1888  		srcObj, ext = &src.baseObject, src.ext()
  1889  	default:
  1890  		fs.Debugf(src, "Can't copy - not same remote type")
  1891  		return nil, fs.ErrorCantCopy
  1892  	}
  1893  
  1894  	if ext != "" {
  1895  		if !strings.HasSuffix(remote, ext) {
  1896  			fs.Debugf(src, "Can't copy - not same document type")
  1897  			return nil, fs.ErrorCantCopy
  1898  		}
  1899  		remote = remote[:len(remote)-len(ext)]
  1900  	}
  1901  
  1902  	// Look to see if there is an existing object
  1903  	existingObject, _ := f.NewObject(ctx, remote)
  1904  
  1905  	createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
  1906  	if err != nil {
  1907  		return nil, err
  1908  	}
  1909  
  1910  	supportTeamDrives, err := f.ShouldSupportTeamDrives(src)
  1911  	if err != nil {
  1912  		return nil, err
  1913  	}
  1914  
  1915  	var info *drive.File
  1916  	err = f.pacer.Call(func() (bool, error) {
  1917  		info, err = f.svc.Files.Copy(srcObj.id, createInfo).
  1918  			Fields(partialFields).
  1919  			SupportsTeamDrives(supportTeamDrives).
  1920  			KeepRevisionForever(f.opt.KeepRevisionForever).
  1921  			Do()
  1922  		return shouldRetry(err)
  1923  	})
  1924  	if err != nil {
  1925  		return nil, err
  1926  	}
  1927  	newObject, err := f.newObjectWithInfo(remote, info)
  1928  	if err != nil {
  1929  		return nil, err
  1930  	}
  1931  	if existingObject != nil {
  1932  		err = existingObject.Remove(ctx)
  1933  		if err != nil {
  1934  			fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
  1935  		}
  1936  	}
  1937  	return newObject, nil
  1938  }
  1939  
  1940  // Purge deletes all the files and the container
  1941  //
  1942  // Optional interface: Only implement this if you have a way of
  1943  // deleting all the files quicker than just running Remove() on the
  1944  // result of List()
  1945  func (f *Fs) Purge(ctx context.Context) error {
  1946  	if f.root == "" {
  1947  		return errors.New("can't purge root directory")
  1948  	}
  1949  	err := f.dirCache.FindRoot(ctx, false)
  1950  	if err != nil {
  1951  		return err
  1952  	}
  1953  	err = f.pacer.Call(func() (bool, error) {
  1954  		if f.opt.UseTrash {
  1955  			info := drive.File{
  1956  				Trashed: true,
  1957  			}
  1958  			_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
  1959  				Fields("").
  1960  				SupportsTeamDrives(f.isTeamDrive).
  1961  				Do()
  1962  		} else {
  1963  			err = f.svc.Files.Delete(f.dirCache.RootID()).
  1964  				Fields("").
  1965  				SupportsTeamDrives(f.isTeamDrive).
  1966  				Do()
  1967  		}
  1968  		return shouldRetry(err)
  1969  	})
  1970  	f.dirCache.ResetRoot()
  1971  	if err != nil {
  1972  		return err
  1973  	}
  1974  	return nil
  1975  }
  1976  
  1977  // CleanUp empties the trash
  1978  func (f *Fs) CleanUp(ctx context.Context) error {
  1979  	err := f.pacer.Call(func() (bool, error) {
  1980  		err := f.svc.Files.EmptyTrash().Do()
  1981  		return shouldRetry(err)
  1982  	})
  1983  
  1984  	if err != nil {
  1985  		return err
  1986  	}
  1987  	return nil
  1988  }
  1989  
  1990  // About gets quota information
  1991  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  1992  	if f.isTeamDrive {
  1993  		// Teamdrives don't appear to have a usage API so just return empty
  1994  		return &fs.Usage{}, nil
  1995  	}
  1996  	var about *drive.About
  1997  	var err error
  1998  	err = f.pacer.Call(func() (bool, error) {
  1999  		about, err = f.svc.About.Get().Fields("storageQuota").Do()
  2000  		return shouldRetry(err)
  2001  	})
  2002  	if err != nil {
  2003  		return nil, errors.Wrap(err, "failed to get Drive storageQuota")
  2004  	}
  2005  	q := about.StorageQuota
  2006  	usage := &fs.Usage{
  2007  		Used:    fs.NewUsageValue(q.UsageInDrive),           // bytes in use
  2008  		Trashed: fs.NewUsageValue(q.UsageInDriveTrash),      // bytes in trash
  2009  		Other:   fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage eg gmail in drive
  2010  	}
  2011  	if q.Limit > 0 {
  2012  		usage.Total = fs.NewUsageValue(q.Limit)          // quota of bytes that can be used
  2013  		usage.Free = fs.NewUsageValue(q.Limit - q.Usage) // bytes which can be uploaded before reaching the quota
  2014  	}
  2015  	return usage, nil
  2016  }
  2017  
  2018  // Move src to this remote using server side move operations.
  2019  //
  2020  // This is stored with the remote path given
  2021  //
  2022  // It returns the destination Object and a possible error
  2023  //
  2024  // Will only be called if src.Fs().Name() == f.Name()
  2025  //
  2026  // If it isn't possible then return fs.ErrorCantMove
  2027  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  2028  	var srcObj *baseObject
  2029  	ext := ""
  2030  	switch src := src.(type) {
  2031  	case *Object:
  2032  		srcObj = &src.baseObject
  2033  	case *documentObject:
  2034  		srcObj, ext = &src.baseObject, src.ext()
  2035  	case *linkObject:
  2036  		srcObj, ext = &src.baseObject, src.ext()
  2037  	default:
  2038  		fs.Debugf(src, "Can't move - not same remote type")
  2039  		return nil, fs.ErrorCantMove
  2040  	}
  2041  
  2042  	if ext != "" {
  2043  		if !strings.HasSuffix(remote, ext) {
  2044  			fs.Debugf(src, "Can't move - not same document type")
  2045  			return nil, fs.ErrorCantMove
  2046  		}
  2047  		remote = remote[:len(remote)-len(ext)]
  2048  	}
  2049  
  2050  	_, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false)
  2051  	if err != nil {
  2052  		return nil, err
  2053  	}
  2054  
  2055  	// Temporary Object under construction
  2056  	dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
  2057  	if err != nil {
  2058  		return nil, err
  2059  	}
  2060  	dstParents := strings.Join(dstInfo.Parents, ",")
  2061  	dstInfo.Parents = nil
  2062  
  2063  	supportTeamDrives, err := f.ShouldSupportTeamDrives(src)
  2064  	if err != nil {
  2065  		return nil, err
  2066  	}
  2067  
  2068  	// Do the move
  2069  	var info *drive.File
  2070  	err = f.pacer.Call(func() (bool, error) {
  2071  		info, err = f.svc.Files.Update(srcObj.id, dstInfo).
  2072  			RemoveParents(srcParentID).
  2073  			AddParents(dstParents).
  2074  			Fields(partialFields).
  2075  			SupportsTeamDrives(supportTeamDrives).
  2076  			Do()
  2077  		return shouldRetry(err)
  2078  	})
  2079  	if err != nil {
  2080  		return nil, err
  2081  	}
  2082  
  2083  	return f.newObjectWithInfo(remote, info)
  2084  }
  2085  
  2086  // ShouldSupportTeamDrives returns the request should support TeamDrives
  2087  func (f *Fs) ShouldSupportTeamDrives(src fs.Object) (bool, error) {
  2088  	srcIsTeamDrive := false
  2089  	if srcFs, ok := src.Fs().(*Fs); ok {
  2090  		srcIsTeamDrive = srcFs.isTeamDrive
  2091  	}
  2092  
  2093  	if f.isTeamDrive {
  2094  		return true, nil
  2095  	}
  2096  
  2097  	return srcIsTeamDrive, nil
  2098  }
  2099  
  2100  // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
  2101  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
  2102  	id, err := f.dirCache.FindDir(ctx, remote, false)
  2103  	if err == nil {
  2104  		fs.Debugf(f, "attempting to share directory '%s'", remote)
  2105  	} else {
  2106  		fs.Debugf(f, "attempting to share single file '%s'", remote)
  2107  		o, err := f.NewObject(ctx, remote)
  2108  		if err != nil {
  2109  			return "", err
  2110  		}
  2111  		id = o.(fs.IDer).ID()
  2112  	}
  2113  
  2114  	permission := &drive.Permission{
  2115  		AllowFileDiscovery: false,
  2116  		Role:               "reader",
  2117  		Type:               "anyone",
  2118  	}
  2119  
  2120  	err = f.pacer.Call(func() (bool, error) {
  2121  		// TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
  2122  		// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
  2123  		_, err = f.svc.Permissions.Create(id, permission).
  2124  			Fields("").
  2125  			SupportsTeamDrives(f.isTeamDrive).
  2126  			Do()
  2127  		return shouldRetry(err)
  2128  	})
  2129  	if err != nil {
  2130  		return "", err
  2131  	}
  2132  	return fmt.Sprintf("https://drive.google.com/open?id=%s", id), nil
  2133  }
  2134  
  2135  // DirMove moves src, srcRemote to this remote at dstRemote
  2136  // using server side move operations.
  2137  //
  2138  // Will only be called if src.Fs().Name() == f.Name()
  2139  //
  2140  // If it isn't possible then return fs.ErrorCantDirMove
  2141  //
  2142  // If destination exists then return fs.ErrorDirExists
  2143  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  2144  	srcFs, ok := src.(*Fs)
  2145  	if !ok {
  2146  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  2147  		return fs.ErrorCantDirMove
  2148  	}
  2149  	srcPath := path.Join(srcFs.root, srcRemote)
  2150  	dstPath := path.Join(f.root, dstRemote)
  2151  
  2152  	// Refuse to move to or from the root
  2153  	if srcPath == "" || dstPath == "" {
  2154  		fs.Debugf(src, "DirMove error: Can't move root")
  2155  		return errors.New("can't move root directory")
  2156  	}
  2157  
  2158  	// find the root src directory
  2159  	err := srcFs.dirCache.FindRoot(ctx, false)
  2160  	if err != nil {
  2161  		return err
  2162  	}
  2163  
  2164  	// find the root dst directory
  2165  	if dstRemote != "" {
  2166  		err = f.dirCache.FindRoot(ctx, true)
  2167  		if err != nil {
  2168  			return err
  2169  		}
  2170  	} else {
  2171  		if f.dirCache.FoundRoot() {
  2172  			return fs.ErrorDirExists
  2173  		}
  2174  	}
  2175  
  2176  	// Find ID of dst parent, creating subdirs if necessary
  2177  	var leaf, dstDirectoryID string
  2178  	findPath := dstRemote
  2179  	if dstRemote == "" {
  2180  		findPath = f.root
  2181  	}
  2182  	leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
  2183  	if err != nil {
  2184  		return err
  2185  	}
  2186  
  2187  	// Check destination does not exist
  2188  	if dstRemote != "" {
  2189  		_, err = f.dirCache.FindDir(ctx, dstRemote, false)
  2190  		if err == fs.ErrorDirNotFound {
  2191  			// OK
  2192  		} else if err != nil {
  2193  			return err
  2194  		} else {
  2195  			return fs.ErrorDirExists
  2196  		}
  2197  	}
  2198  
  2199  	// Find ID of src parent
  2200  	var srcDirectoryID string
  2201  	if srcRemote == "" {
  2202  		srcDirectoryID, err = srcFs.dirCache.RootParentID()
  2203  	} else {
  2204  		_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false)
  2205  	}
  2206  	if err != nil {
  2207  		return err
  2208  	}
  2209  
  2210  	// Find ID of src
  2211  	srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
  2212  	if err != nil {
  2213  		return err
  2214  	}
  2215  
  2216  	// Do the move
  2217  	patch := drive.File{
  2218  		Name: leaf,
  2219  	}
  2220  	err = f.pacer.Call(func() (bool, error) {
  2221  		_, err = f.svc.Files.Update(srcID, &patch).
  2222  			RemoveParents(srcDirectoryID).
  2223  			AddParents(dstDirectoryID).
  2224  			Fields("").
  2225  			SupportsTeamDrives(f.isTeamDrive).
  2226  			Do()
  2227  		return shouldRetry(err)
  2228  	})
  2229  	if err != nil {
  2230  		return err
  2231  	}
  2232  	srcFs.dirCache.FlushDir(srcRemote)
  2233  	return nil
  2234  }
  2235  
  2236  // ChangeNotify calls the passed function with a path that has had changes.
  2237  // If the implementation uses polling, it should adhere to the given interval.
  2238  //
  2239  // Automatically restarts itself in case of unexpected behavior of the remote.
  2240  //
  2241  // Close the returned channel to stop being notified.
  2242  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
  2243  	go func() {
  2244  		// get the StartPageToken early so all changes from now on get processed
  2245  		startPageToken, err := f.changeNotifyStartPageToken()
  2246  		if err != nil {
  2247  			fs.Infof(f, "Failed to get StartPageToken: %s", err)
  2248  		}
  2249  		var ticker *time.Ticker
  2250  		var tickerC <-chan time.Time
  2251  		for {
  2252  			select {
  2253  			case pollInterval, ok := <-pollIntervalChan:
  2254  				if !ok {
  2255  					if ticker != nil {
  2256  						ticker.Stop()
  2257  					}
  2258  					return
  2259  				}
  2260  				if ticker != nil {
  2261  					ticker.Stop()
  2262  					ticker, tickerC = nil, nil
  2263  				}
  2264  				if pollInterval != 0 {
  2265  					ticker = time.NewTicker(pollInterval)
  2266  					tickerC = ticker.C
  2267  				}
  2268  			case <-tickerC:
  2269  				if startPageToken == "" {
  2270  					startPageToken, err = f.changeNotifyStartPageToken()
  2271  					if err != nil {
  2272  						fs.Infof(f, "Failed to get StartPageToken: %s", err)
  2273  						continue
  2274  					}
  2275  				}
  2276  				fs.Debugf(f, "Checking for changes on remote")
  2277  				startPageToken, err = f.changeNotifyRunner(notifyFunc, startPageToken)
  2278  				if err != nil {
  2279  					fs.Infof(f, "Change notify listener failure: %s", err)
  2280  				}
  2281  			}
  2282  		}
  2283  	}()
  2284  }
  2285  func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
  2286  	var startPageToken *drive.StartPageToken
  2287  	err = f.pacer.Call(func() (bool, error) {
  2288  		startPageToken, err = f.svc.Changes.GetStartPageToken().
  2289  			SupportsTeamDrives(f.isTeamDrive).
  2290  			Do()
  2291  		return shouldRetry(err)
  2292  	})
  2293  	if err != nil {
  2294  		return
  2295  	}
  2296  	return startPageToken.StartPageToken, nil
  2297  }
  2298  
  2299  func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
  2300  	pageToken := startPageToken
  2301  	for {
  2302  		var changeList *drive.ChangeList
  2303  
  2304  		err = f.pacer.Call(func() (bool, error) {
  2305  			changesCall := f.svc.Changes.List(pageToken).
  2306  				Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
  2307  			if f.opt.ListChunk > 0 {
  2308  				changesCall.PageSize(f.opt.ListChunk)
  2309  			}
  2310  			if f.isTeamDrive {
  2311  				changesCall.TeamDriveId(f.opt.TeamDriveID)
  2312  				changesCall.SupportsTeamDrives(true)
  2313  				changesCall.IncludeTeamDriveItems(true)
  2314  			}
  2315  			changeList, err = changesCall.Do()
  2316  			return shouldRetry(err)
  2317  		})
  2318  		if err != nil {
  2319  			return
  2320  		}
  2321  
  2322  		type entryType struct {
  2323  			path      string
  2324  			entryType fs.EntryType
  2325  		}
  2326  		var pathsToClear []entryType
  2327  		for _, change := range changeList.Changes {
  2328  			// find the previous path
  2329  			if path, ok := f.dirCache.GetInv(change.FileId); ok {
  2330  				if change.File != nil && change.File.MimeType != driveFolderType {
  2331  					pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
  2332  				} else {
  2333  					pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
  2334  				}
  2335  			}
  2336  
  2337  			// find the new path
  2338  			if change.File != nil {
  2339  				changeType := fs.EntryDirectory
  2340  				if change.File.MimeType != driveFolderType {
  2341  					changeType = fs.EntryObject
  2342  				}
  2343  
  2344  				// translate the parent dir of this object
  2345  				if len(change.File.Parents) > 0 {
  2346  					for _, parent := range change.File.Parents {
  2347  						if parentPath, ok := f.dirCache.GetInv(parent); ok {
  2348  							// and append the drive file name to compute the full file name
  2349  							newPath := path.Join(parentPath, change.File.Name)
  2350  							// this will now clear the actual file too
  2351  							pathsToClear = append(pathsToClear, entryType{path: newPath, entryType: changeType})
  2352  						}
  2353  					}
  2354  				} else { // a true root object that is changed
  2355  					pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: changeType})
  2356  				}
  2357  			}
  2358  		}
  2359  
  2360  		visitedPaths := make(map[string]struct{})
  2361  		for _, entry := range pathsToClear {
  2362  			if _, ok := visitedPaths[entry.path]; ok {
  2363  				continue
  2364  			}
  2365  			visitedPaths[entry.path] = struct{}{}
  2366  			notifyFunc(entry.path, entry.entryType)
  2367  		}
  2368  
  2369  		switch {
  2370  		case changeList.NewStartPageToken != "":
  2371  			return changeList.NewStartPageToken, nil
  2372  		case changeList.NextPageToken != "":
  2373  			pageToken = changeList.NextPageToken
  2374  		default:
  2375  			return
  2376  		}
  2377  	}
  2378  }
  2379  
  2380  // DirCacheFlush resets the directory cache - used in testing as an
  2381  // optional interface
  2382  func (f *Fs) DirCacheFlush() {
  2383  	f.dirCache.ResetRoot()
  2384  }
  2385  
  2386  // Hashes returns the supported hash sets.
  2387  func (f *Fs) Hashes() hash.Set {
  2388  	return hash.Set(hash.MD5)
  2389  }
  2390  
  2391  // ------------------------------------------------------------
  2392  
  2393  // Fs returns the parent Fs
  2394  func (o *baseObject) Fs() fs.Info {
  2395  	return o.fs
  2396  }
  2397  
  2398  // Return a string version
  2399  func (o *baseObject) String() string {
  2400  	return o.remote
  2401  }
  2402  
  2403  // Return a string version
  2404  func (o *Object) String() string {
  2405  	if o == nil {
  2406  		return "<nil>"
  2407  	}
  2408  	return o.remote
  2409  }
  2410  
  2411  // Remote returns the remote path
  2412  func (o *baseObject) Remote() string {
  2413  	return o.remote
  2414  }
  2415  
  2416  // Hash returns the Md5sum of an object returning a lowercase hex string
  2417  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  2418  	if t != hash.MD5 {
  2419  		return "", hash.ErrUnsupported
  2420  	}
  2421  	return o.md5sum, nil
  2422  }
  2423  func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
  2424  	if t != hash.MD5 {
  2425  		return "", hash.ErrUnsupported
  2426  	}
  2427  	return "", nil
  2428  }
  2429  
  2430  // Size returns the size of an object in bytes
  2431  func (o *baseObject) Size() int64 {
  2432  	return o.bytes
  2433  }
  2434  
  2435  // getRemoteInfo returns a drive.File for the remote
  2436  func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
  2437  	info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
  2438  	return
  2439  }
  2440  
  2441  // getRemoteInfoWithExport returns a drive.File and the export settings for the remote
  2442  func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
  2443  	info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
  2444  	leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
  2445  	if err != nil {
  2446  		if err == fs.ErrorDirNotFound {
  2447  			return nil, "", "", "", false, fs.ErrorObjectNotFound
  2448  		}
  2449  		return nil, "", "", "", false, err
  2450  	}
  2451  
  2452  	found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
  2453  		if !f.opt.SkipGdocs {
  2454  			extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
  2455  			if exportName == leaf {
  2456  				info = item
  2457  				return true
  2458  			}
  2459  			if isDocument {
  2460  				return false
  2461  			}
  2462  		}
  2463  		if item.Name == leaf {
  2464  			info = item
  2465  			return true
  2466  		}
  2467  		return false
  2468  	})
  2469  	if err != nil {
  2470  		return nil, "", "", "", false, err
  2471  	}
  2472  	if !found {
  2473  		return nil, "", "", "", false, fs.ErrorObjectNotFound
  2474  	}
  2475  	return
  2476  }
  2477  
  2478  // ModTime returns the modification time of the object
  2479  //
  2480  //
  2481  // It attempts to read the objects mtime and if that isn't present the
  2482  // LastModified returned in the http headers
  2483  func (o *baseObject) ModTime(ctx context.Context) time.Time {
  2484  	modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
  2485  	if err != nil {
  2486  		fs.Debugf(o, "Failed to read mtime from object: %v", err)
  2487  		return time.Now()
  2488  	}
  2489  	return modTime
  2490  }
  2491  
  2492  // SetModTime sets the modification time of the drive fs object
  2493  func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
  2494  	// New metadata
  2495  	updateInfo := &drive.File{
  2496  		ModifiedTime: modTime.Format(timeFormatOut),
  2497  	}
  2498  	// Set modified date
  2499  	var info *drive.File
  2500  	err := o.fs.pacer.Call(func() (bool, error) {
  2501  		var err error
  2502  		info, err = o.fs.svc.Files.Update(o.id, updateInfo).
  2503  			Fields(partialFields).
  2504  			SupportsTeamDrives(o.fs.isTeamDrive).
  2505  			Do()
  2506  		return shouldRetry(err)
  2507  	})
  2508  	if err != nil {
  2509  		return err
  2510  	}
  2511  	// Update info from read data
  2512  	o.modifiedDate = info.ModifiedTime
  2513  	return nil
  2514  }
  2515  
  2516  // Storable returns a boolean as to whether this object is storable
  2517  func (o *baseObject) Storable() bool {
  2518  	return true
  2519  }
  2520  
  2521  // httpResponse gets an http.Response object for the object
  2522  // using the url and method passed in
  2523  func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
  2524  	if url == "" {
  2525  		return nil, nil, errors.New("forbidden to download - check sharing permission")
  2526  	}
  2527  	req, err = http.NewRequest(method, url, nil)
  2528  	if err != nil {
  2529  		return req, nil, err
  2530  	}
  2531  	fs.OpenOptionAddHTTPHeaders(req.Header, options)
  2532  	if o.bytes == 0 {
  2533  		// Don't supply range requests for 0 length objects as they always fail
  2534  		delete(req.Header, "Range")
  2535  	}
  2536  	err = o.fs.pacer.Call(func() (bool, error) {
  2537  		res, err = o.fs.client.Do(req)
  2538  		if err == nil {
  2539  			err = googleapi.CheckResponse(res)
  2540  			if err != nil {
  2541  				_ = res.Body.Close() // ignore error
  2542  			}
  2543  		}
  2544  		return shouldRetry(err)
  2545  	})
  2546  	if err != nil {
  2547  		return req, nil, err
  2548  	}
  2549  	return req, res, nil
  2550  }
  2551  
  2552  // openDocumentFile represents an documentObject open for reading.
  2553  // Updates the object size after read successfully.
  2554  type openDocumentFile struct {
  2555  	o       *documentObject // Object we are reading for
  2556  	in      io.ReadCloser   // reading from here
  2557  	bytes   int64           // number of bytes read on this connection
  2558  	eof     bool            // whether we have read end of file
  2559  	errored bool            // whether we have encountered an error during reading
  2560  }
  2561  
  2562  // Read bytes from the object - see io.Reader
  2563  func (file *openDocumentFile) Read(p []byte) (n int, err error) {
  2564  	n, err = file.in.Read(p)
  2565  	file.bytes += int64(n)
  2566  	if err != nil && err != io.EOF {
  2567  		file.errored = true
  2568  	}
  2569  	if err == io.EOF {
  2570  		file.eof = true
  2571  	}
  2572  	return
  2573  }
  2574  
  2575  // Close the object and update bytes read
  2576  func (file *openDocumentFile) Close() (err error) {
  2577  	// If end of file, update bytes read
  2578  	if file.eof && !file.errored {
  2579  		fs.Debugf(file.o, "Updating size of doc after download to %v", file.bytes)
  2580  		file.o.bytes = file.bytes
  2581  	}
  2582  	return file.in.Close()
  2583  }
  2584  
  2585  // Check it satisfies the interfaces
  2586  var _ io.ReadCloser = (*openDocumentFile)(nil)
  2587  
  2588  // Checks to see if err is a googleapi.Error with of type what
  2589  func isGoogleError(err error, what string) bool {
  2590  	if gerr, ok := err.(*googleapi.Error); ok {
  2591  		for _, error := range gerr.Errors {
  2592  			if error.Reason == what {
  2593  				return true
  2594  			}
  2595  		}
  2596  	}
  2597  	return false
  2598  }
  2599  
  2600  // open a url for reading
  2601  func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2602  	_, res, err := o.httpResponse(url, "GET", options)
  2603  	if err != nil {
  2604  		if isGoogleError(err, "cannotDownloadAbusiveFile") {
  2605  			if o.fs.opt.AcknowledgeAbuse {
  2606  				// Retry acknowledging abuse
  2607  				if strings.ContainsRune(url, '?') {
  2608  					url += "&"
  2609  				} else {
  2610  					url += "?"
  2611  				}
  2612  				url += "acknowledgeAbuse=true"
  2613  				_, res, err = o.httpResponse(url, "GET", options)
  2614  			} else {
  2615  				err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
  2616  			}
  2617  		}
  2618  		if err != nil {
  2619  			return nil, errors.Wrap(err, "open file failed")
  2620  		}
  2621  	}
  2622  	return res.Body, nil
  2623  }
  2624  
  2625  // Open an object for read
  2626  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2627  	if o.v2Download {
  2628  		var v2File *drive_v2.File
  2629  		err = o.fs.pacer.Call(func() (bool, error) {
  2630  			v2File, err = o.fs.v2Svc.Files.Get(o.id).
  2631  				Fields("downloadUrl").
  2632  				SupportsTeamDrives(o.fs.isTeamDrive).
  2633  				Do()
  2634  			return shouldRetry(err)
  2635  		})
  2636  		if err == nil {
  2637  			fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
  2638  			o.url = v2File.DownloadUrl
  2639  			o.v2Download = false
  2640  		}
  2641  	}
  2642  	return o.baseObject.open(o.url, options...)
  2643  }
  2644  func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2645  	// Update the size with what we are reading as it can change from
  2646  	// the HEAD in the listing to this GET. This stops rclone marking
  2647  	// the transfer as corrupted.
  2648  	var offset, end int64 = 0, -1
  2649  	var newOptions = options[:0]
  2650  	for _, o := range options {
  2651  		// Note that Range requests don't work on Google docs:
  2652  		// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
  2653  		// So do a subset of them manually
  2654  		switch x := o.(type) {
  2655  		case *fs.RangeOption:
  2656  			offset, end = x.Start, x.End
  2657  		case *fs.SeekOption:
  2658  			offset, end = x.Offset, -1
  2659  		default:
  2660  			newOptions = append(newOptions, o)
  2661  		}
  2662  	}
  2663  	options = newOptions
  2664  	if offset != 0 {
  2665  		return nil, errors.New("partial downloads are not supported while exporting Google Documents")
  2666  	}
  2667  	in, err = o.baseObject.open(o.url, options...)
  2668  	if in != nil {
  2669  		in = &openDocumentFile{o: o, in: in}
  2670  	}
  2671  	if end >= 0 {
  2672  		in = readers.NewLimitedReadCloser(in, end-offset+1)
  2673  	}
  2674  	return
  2675  }
  2676  func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2677  	var offset, limit int64 = 0, -1
  2678  	var data = o.content
  2679  	for _, option := range options {
  2680  		switch x := option.(type) {
  2681  		case *fs.SeekOption:
  2682  			offset = x.Offset
  2683  		case *fs.RangeOption:
  2684  			offset, limit = x.Decode(int64(len(data)))
  2685  		default:
  2686  			if option.Mandatory() {
  2687  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  2688  			}
  2689  		}
  2690  	}
  2691  	if l := int64(len(data)); offset > l {
  2692  		offset = l
  2693  	}
  2694  	data = data[offset:]
  2695  	if limit != -1 && limit < int64(len(data)) {
  2696  		data = data[:limit]
  2697  	}
  2698  
  2699  	return ioutil.NopCloser(bytes.NewReader(data)), nil
  2700  }
  2701  
  2702  func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io.Reader,
  2703  	src fs.ObjectInfo) (info *drive.File, err error) {
  2704  	// Make the API request to upload metadata and file data.
  2705  	size := src.Size()
  2706  	if size == 0 || size < int64(o.fs.opt.UploadCutoff) {
  2707  		// Don't retry, return a retry error instead
  2708  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  2709  			info, err = o.fs.svc.Files.Update(o.id, updateInfo).
  2710  				Media(in, googleapi.ContentType(uploadMimeType)).
  2711  				Fields(partialFields).
  2712  				SupportsTeamDrives(o.fs.isTeamDrive).
  2713  				KeepRevisionForever(o.fs.opt.KeepRevisionForever).
  2714  				Do()
  2715  			return shouldRetry(err)
  2716  		})
  2717  		return
  2718  	}
  2719  	// Upload the file in chunks
  2720  	return o.fs.Upload(in, size, uploadMimeType, o.id, o.remote, updateInfo)
  2721  }
  2722  
  2723  // Update the already existing object
  2724  //
  2725  // Copy the reader into the object updating modTime and size
  2726  //
  2727  // The new object may have been created if an error is returned
  2728  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  2729  	srcMimeType := fs.MimeType(ctx, src)
  2730  	updateInfo := &drive.File{
  2731  		MimeType:     srcMimeType,
  2732  		ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
  2733  	}
  2734  	info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
  2735  	if err != nil {
  2736  		return err
  2737  	}
  2738  	newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
  2739  	if err != nil {
  2740  		return err
  2741  	}
  2742  	switch newO := newO.(type) {
  2743  	case *Object:
  2744  		*o = *newO
  2745  	default:
  2746  		return errors.New("object type changed by update")
  2747  	}
  2748  
  2749  	return nil
  2750  }
  2751  func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  2752  	srcMimeType := fs.MimeType(ctx, src)
  2753  	importMimeType := ""
  2754  	updateInfo := &drive.File{
  2755  		MimeType:     srcMimeType,
  2756  		ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
  2757  	}
  2758  
  2759  	if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
  2760  		return errors.Errorf("can't update google document type without --drive-import-formats")
  2761  	}
  2762  	importMimeType = o.fs.findImportFormat(updateInfo.MimeType)
  2763  	if importMimeType == "" {
  2764  		return errors.Errorf("no import format found for %q", srcMimeType)
  2765  	}
  2766  	if importMimeType != o.documentMimeType {
  2767  		return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
  2768  	}
  2769  	updateInfo.MimeType = importMimeType
  2770  
  2771  	info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
  2772  	if err != nil {
  2773  		return err
  2774  	}
  2775  
  2776  	remote := src.Remote()
  2777  	remote = remote[:len(remote)-o.extLen]
  2778  
  2779  	newO, err := o.fs.newObjectWithInfo(remote, info)
  2780  	if err != nil {
  2781  		return err
  2782  	}
  2783  	switch newO := newO.(type) {
  2784  	case *documentObject:
  2785  		*o = *newO
  2786  	default:
  2787  		return errors.New("object type changed by update")
  2788  	}
  2789  
  2790  	return nil
  2791  }
  2792  
  2793  func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  2794  	return errors.New("cannot update link files")
  2795  }
  2796  
  2797  // Remove an object
  2798  func (o *baseObject) Remove(ctx context.Context) error {
  2799  	var err error
  2800  	err = o.fs.pacer.Call(func() (bool, error) {
  2801  		if o.fs.opt.UseTrash {
  2802  			info := drive.File{
  2803  				Trashed: true,
  2804  			}
  2805  			_, err = o.fs.svc.Files.Update(o.id, &info).
  2806  				Fields("").
  2807  				SupportsTeamDrives(o.fs.isTeamDrive).
  2808  				Do()
  2809  		} else {
  2810  			err = o.fs.svc.Files.Delete(o.id).
  2811  				Fields("").
  2812  				SupportsTeamDrives(o.fs.isTeamDrive).
  2813  				Do()
  2814  		}
  2815  		return shouldRetry(err)
  2816  	})
  2817  	return err
  2818  }
  2819  
  2820  // MimeType of an Object if known, "" otherwise
  2821  func (o *baseObject) MimeType(ctx context.Context) string {
  2822  	return o.mimeType
  2823  }
  2824  
  2825  // ID returns the ID of the Object if known, or "" if not
  2826  func (o *baseObject) ID() string {
  2827  	return o.id
  2828  }
  2829  
  2830  func (o *documentObject) ext() string {
  2831  	return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
  2832  }
  2833  func (o *linkObject) ext() string {
  2834  	return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
  2835  }
  2836  
  2837  // templates for document link files
  2838  const (
  2839  	urlTemplate = `[InternetShortcut]{{"\r"}}
  2840  URL={{ .URL }}{{"\r"}}
  2841  `
  2842  	weblocTemplate = `<?xml version="1.0" encoding="UTF-8"?>
  2843  <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
  2844  <plist version="1.0">
  2845    <dict>
  2846      <key>URL</key>
  2847      <string>{{ .URL }}</string>
  2848    </dict>
  2849  </plist>
  2850  `
  2851  	desktopTemplate = `[Desktop Entry]
  2852  Encoding=UTF-8
  2853  Name={{ .Title }}
  2854  URL={{ .URL }}
  2855  Icon=text-html
  2856  Type=Link
  2857  `
  2858  	htmlTemplate = `<html>
  2859  <head>
  2860    <meta http-equiv="refresh" content="0; url={{ .URL }}" />
  2861    <title>{{ .Title }}</title>
  2862  </head>
  2863  <body>
  2864    Loading <a href="{{ .URL }}">{{ .Title }}</a>
  2865  </body>
  2866  </html>
  2867  `
  2868  )
  2869  
  2870  // Check the interfaces are satisfied
  2871  var (
  2872  	_ fs.Fs              = (*Fs)(nil)
  2873  	_ fs.Purger          = (*Fs)(nil)
  2874  	_ fs.CleanUpper      = (*Fs)(nil)
  2875  	_ fs.PutStreamer     = (*Fs)(nil)
  2876  	_ fs.Copier          = (*Fs)(nil)
  2877  	_ fs.Mover           = (*Fs)(nil)
  2878  	_ fs.DirMover        = (*Fs)(nil)
  2879  	_ fs.DirCacheFlusher = (*Fs)(nil)
  2880  	_ fs.ChangeNotifier  = (*Fs)(nil)
  2881  	_ fs.PutUncheckeder  = (*Fs)(nil)
  2882  	_ fs.PublicLinker    = (*Fs)(nil)
  2883  	_ fs.ListRer         = (*Fs)(nil)
  2884  	_ fs.MergeDirser     = (*Fs)(nil)
  2885  	_ fs.Abouter         = (*Fs)(nil)
  2886  	_ fs.Object          = (*Object)(nil)
  2887  	_ fs.MimeTyper       = (*Object)(nil)
  2888  	_ fs.IDer            = (*Object)(nil)
  2889  	_ fs.Object          = (*documentObject)(nil)
  2890  	_ fs.MimeTyper       = (*documentObject)(nil)
  2891  	_ fs.IDer            = (*documentObject)(nil)
  2892  	_ fs.Object          = (*linkObject)(nil)
  2893  	_ fs.MimeTyper       = (*linkObject)(nil)
  2894  	_ fs.IDer            = (*linkObject)(nil)
  2895  )