github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/backend/drive/drive.go (about)

     1  // Package drive interfaces with the Google Drive object storage system
     2  package drive
     3  
     4  // FIXME need to deal with some corner cases
     5  // * multiple files with the same name
     6  // * files can be in multiple directories
     7  // * can have directory loops
     8  // * files with / in name
     9  
    10  import (
    11  	"bytes"
    12  	"context"
    13  	"crypto/tls"
    14  	"fmt"
    15  	"io"
    16  	"io/ioutil"
    17  	"log"
    18  	"math/rand"
    19  	"mime"
    20  	"net/http"
    21  	"net/url"
    22  	"os"
    23  	"path"
    24  	"sort"
    25  	"strconv"
    26  	"strings"
    27  	"sync"
    28  	"text/template"
    29  	"time"
    30  
    31  	"github.com/pkg/errors"
    32  	"github.com/rclone/rclone/fs"
    33  	"github.com/rclone/rclone/fs/config"
    34  	"github.com/rclone/rclone/fs/config/configmap"
    35  	"github.com/rclone/rclone/fs/config/configstruct"
    36  	"github.com/rclone/rclone/fs/config/obscure"
    37  	"github.com/rclone/rclone/fs/fserrors"
    38  	"github.com/rclone/rclone/fs/fshttp"
    39  	"github.com/rclone/rclone/fs/hash"
    40  	"github.com/rclone/rclone/fs/walk"
    41  	"github.com/rclone/rclone/lib/dircache"
    42  	"github.com/rclone/rclone/lib/encoder"
    43  	"github.com/rclone/rclone/lib/oauthutil"
    44  	"github.com/rclone/rclone/lib/pacer"
    45  	"github.com/rclone/rclone/lib/readers"
    46  	"golang.org/x/oauth2"
    47  	"golang.org/x/oauth2/google"
    48  	drive_v2 "google.golang.org/api/drive/v2"
    49  	drive "google.golang.org/api/drive/v3"
    50  	"google.golang.org/api/googleapi"
    51  )
    52  
    53  // Constants
    54  const (
    55  	rcloneClientID              = "202264815644.apps.googleusercontent.com"
    56  	rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
    57  	driveFolderType             = "application/vnd.google-apps.folder"
    58  	timeFormatIn                = time.RFC3339
    59  	timeFormatOut               = "2006-01-02T15:04:05.000000000Z07:00"
    60  	defaultMinSleep             = fs.Duration(100 * time.Millisecond)
    61  	defaultBurst                = 100
    62  	defaultExportExtensions     = "docx,xlsx,pptx,svg"
    63  	scopePrefix                 = "https://www.googleapis.com/auth/"
    64  	defaultScope                = "drive"
    65  	// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
    66  	// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
    67  	minChunkSize     = 256 * fs.KibiByte
    68  	defaultChunkSize = 8 * fs.MebiByte
    69  	partialFields    = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
    70  )
    71  
    72  // Globals
    73  var (
    74  	// Description of how to auth for this app
    75  	driveConfig = &oauth2.Config{
    76  		Scopes:       []string{scopePrefix + "drive"},
    77  		Endpoint:     google.Endpoint,
    78  		ClientID:     rcloneClientID,
    79  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
    80  		RedirectURL:  oauthutil.TitleBarRedirectURL,
    81  	}
    82  	_mimeTypeToExtensionDuplicates = map[string]string{
    83  		"application/x-vnd.oasis.opendocument.presentation": ".odp",
    84  		"application/x-vnd.oasis.opendocument.spreadsheet":  ".ods",
    85  		"application/x-vnd.oasis.opendocument.text":         ".odt",
    86  		"image/jpg":   ".jpg",
    87  		"image/x-bmp": ".bmp",
    88  		"image/x-png": ".png",
    89  		"text/rtf":    ".rtf",
    90  	}
    91  	_mimeTypeToExtension = map[string]string{
    92  		"application/epub+zip":                            ".epub",
    93  		"application/json":                                ".json",
    94  		"application/msword":                              ".doc",
    95  		"application/pdf":                                 ".pdf",
    96  		"application/rtf":                                 ".rtf",
    97  		"application/vnd.ms-excel":                        ".xls",
    98  		"application/vnd.oasis.opendocument.presentation": ".odp",
    99  		"application/vnd.oasis.opendocument.spreadsheet":  ".ods",
   100  		"application/vnd.oasis.opendocument.text":         ".odt",
   101  		"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
   102  		"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":         ".xlsx",
   103  		"application/vnd.openxmlformats-officedocument.wordprocessingml.document":   ".docx",
   104  		"application/x-msmetafile":  ".wmf",
   105  		"application/zip":           ".zip",
   106  		"image/bmp":                 ".bmp",
   107  		"image/jpeg":                ".jpg",
   108  		"image/pjpeg":               ".pjpeg",
   109  		"image/png":                 ".png",
   110  		"image/svg+xml":             ".svg",
   111  		"text/csv":                  ".csv",
   112  		"text/html":                 ".html",
   113  		"text/plain":                ".txt",
   114  		"text/tab-separated-values": ".tsv",
   115  	}
   116  	_mimeTypeToExtensionLinks = map[string]string{
   117  		"application/x-link-desktop": ".desktop",
   118  		"application/x-link-html":    ".link.html",
   119  		"application/x-link-url":     ".url",
   120  		"application/x-link-webloc":  ".webloc",
   121  	}
   122  	_mimeTypeCustomTransform = map[string]string{
   123  		"application/vnd.google-apps.script+json": "application/json",
   124  	}
   125  	fetchFormatsOnce sync.Once                     // make sure we fetch the export/import formats only once
   126  	_exportFormats   map[string][]string           // allowed export MIME type conversions
   127  	_importFormats   map[string][]string           // allowed import MIME type conversions
   128  	templatesOnce    sync.Once                     // parse link templates only once
   129  	_linkTemplates   map[string]*template.Template // available link types
   130  )
   131  
   132  // Parse the scopes option returning a slice of scopes
   133  func driveScopes(scopesString string) (scopes []string) {
   134  	if scopesString == "" {
   135  		scopesString = defaultScope
   136  	}
   137  	for _, scope := range strings.Split(scopesString, ",") {
   138  		scope = strings.TrimSpace(scope)
   139  		scopes = append(scopes, scopePrefix+scope)
   140  	}
   141  	return scopes
   142  }
   143  
   144  // Returns true if one of the scopes was "drive.appfolder"
   145  func driveScopesContainsAppFolder(scopes []string) bool {
   146  	for _, scope := range scopes {
   147  		if scope == scopePrefix+"drive.appfolder" {
   148  			return true
   149  		}
   150  
   151  	}
   152  	return false
   153  }
   154  
   155  // Register with Fs
   156  func init() {
   157  	fs.Register(&fs.RegInfo{
   158  		Name:        "drive",
   159  		Description: "Google Drive",
   160  		NewFs:       NewFs,
   161  		Config: func(name string, m configmap.Mapper) {
   162  			ctx := context.TODO()
   163  			// Parse config into Options struct
   164  			opt := new(Options)
   165  			err := configstruct.Set(m, opt)
   166  			if err != nil {
   167  				fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
   168  				return
   169  			}
   170  
   171  			// Fill in the scopes
   172  			driveConfig.Scopes = driveScopes(opt.Scope)
   173  			// Set the root_folder_id if using drive.appfolder
   174  			if driveScopesContainsAppFolder(driveConfig.Scopes) {
   175  				m.Set("root_folder_id", "appDataFolder")
   176  			}
   177  
   178  			if opt.ServiceAccountFile == "" {
   179  				err = oauthutil.Config("drive", name, m, driveConfig)
   180  				if err != nil {
   181  					log.Fatalf("Failed to configure token: %v", err)
   182  				}
   183  			}
   184  			err = configTeamDrive(ctx, opt, m, name)
   185  			if err != nil {
   186  				log.Fatalf("Failed to configure team drive: %v", err)
   187  			}
   188  		},
   189  		Options: []fs.Option{{
   190  			Name: config.ConfigClientID,
   191  			Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
   192  		}, {
   193  			Name: config.ConfigClientSecret,
   194  			Help: "Google Application Client Secret\nSetting your own is recommended.",
   195  		}, {
   196  			Name: "scope",
   197  			Help: "Scope that rclone should use when requesting access from drive.",
   198  			Examples: []fs.OptionExample{{
   199  				Value: "drive",
   200  				Help:  "Full access all files, excluding Application Data Folder.",
   201  			}, {
   202  				Value: "drive.readonly",
   203  				Help:  "Read-only access to file metadata and file contents.",
   204  			}, {
   205  				Value: "drive.file",
   206  				Help:  "Access to files created by rclone only.\nThese are visible in the drive website.\nFile authorization is revoked when the user deauthorizes the app.",
   207  			}, {
   208  				Value: "drive.appfolder",
   209  				Help:  "Allows read and write access to the Application Data folder.\nThis is not visible in the drive website.",
   210  			}, {
   211  				Value: "drive.metadata.readonly",
   212  				Help:  "Allows read-only access to file metadata but\ndoes not allow any access to read or download file content.",
   213  			}},
   214  		}, {
   215  			Name: "root_folder_id",
   216  			Help: `ID of the root folder
   217  Leave blank normally.
   218  
   219  Fill in to access "Computers" folders (see docs), or for rclone to use
   220  a non root folder as its starting point.
   221  
   222  Note that if this is blank, the first time rclone runs it will fill it
   223  in with the ID of the root folder.
   224  `,
   225  		}, {
   226  			Name: "service_account_file",
   227  			Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   228  		}, {
   229  			Name: "service_account_file_path",
   230  			Help: "Service Account Credentials JSON file path .\n",
   231  		},{
   232  			Name:     "service_account_credentials",
   233  			Help:     "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   234  			Hide:     fs.OptionHideConfigurator,
   235  			Advanced: true,
   236  		}, {
   237  			Name:     "team_drive",
   238  			Help:     "ID of the Team Drive",
   239  			Hide:     fs.OptionHideConfigurator,
   240  			Advanced: true,
   241  		}, {
   242  			Name:     "auth_owner_only",
   243  			Default:  false,
   244  			Help:     "Only consider files owned by the authenticated user.",
   245  			Advanced: true,
   246  		}, {
   247  			Name:     "use_trash",
   248  			Default:  true,
   249  			Help:     "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
   250  			Advanced: true,
   251  		}, {
   252  			Name:     "skip_gdocs",
   253  			Default:  false,
   254  			Help:     "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
   255  			Advanced: true,
   256  		}, {
   257  			Name:    "skip_checksum_gphotos",
   258  			Default: false,
   259  			Help: `Skip MD5 checksum on Google photos and videos only.
   260  
   261  Use this if you get checksum errors when transferring Google photos or
   262  videos.
   263  
   264  Setting this flag will cause Google photos and videos to return a
   265  blank MD5 checksum.
   266  
   267  Google photos are identifed by being in the "photos" space.
   268  
   269  Corrupted checksums are caused by Google modifying the image/video but
   270  not updating the checksum.`,
   271  			Advanced: true,
   272  		}, {
   273  			Name:    "shared_with_me",
   274  			Default: false,
   275  			Help: `Only show files that are shared with me.
   276  
   277  Instructs rclone to operate on your "Shared with me" folder (where
   278  Google Drive lets you access the files and folders others have shared
   279  with you).
   280  
   281  This works both with the "list" (lsd, lsl, etc) and the "copy"
   282  commands (copy, sync, etc), and with all other commands too.`,
   283  			Advanced: true,
   284  		}, {
   285  			Name:     "trashed_only",
   286  			Default:  false,
   287  			Help:     "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
   288  			Advanced: true,
   289  		}, {
   290  			Name:     "formats",
   291  			Default:  "",
   292  			Help:     "Deprecated: see export_formats",
   293  			Advanced: true,
   294  			Hide:     fs.OptionHideConfigurator,
   295  		}, {
   296  			Name:     "export_formats",
   297  			Default:  defaultExportExtensions,
   298  			Help:     "Comma separated list of preferred formats for downloading Google docs.",
   299  			Advanced: true,
   300  		}, {
   301  			Name:     "import_formats",
   302  			Default:  "",
   303  			Help:     "Comma separated list of preferred formats for uploading Google docs.",
   304  			Advanced: true,
   305  		}, {
   306  			Name:     "allow_import_name_change",
   307  			Default:  false,
   308  			Help:     "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
   309  			Advanced: true,
   310  		}, {
   311  			Name:    "use_created_date",
   312  			Default: false,
   313  			Help: `Use file created date instead of modified date.,
   314  
   315  Useful when downloading data and you want the creation date used in
   316  place of the last modified date.
   317  
   318  **WARNING**: This flag may have some unexpected consequences.
   319  
   320  When uploading to your drive all files will be overwritten unless they
   321  haven't been modified since their creation. And the inverse will occur
   322  while downloading.  This side effect can be avoided by using the
   323  "--checksum" flag.
   324  
   325  This feature was implemented to retain photos capture date as recorded
   326  by google photos. You will first need to check the "Create a Google
   327  Photos folder" option in your google drive settings. You can then copy
   328  or move the photos locally and use the date the image was taken
   329  (created) set as the modification date.`,
   330  			Advanced: true,
   331  			Hide:     fs.OptionHideConfigurator,
   332  		}, {
   333  			Name:    "use_shared_date",
   334  			Default: false,
   335  			Help: `Use date file was shared instead of modified date.
   336  
   337  Note that, as with "--drive-use-created-date", this flag may have
   338  unexpected consequences when uploading/downloading files.
   339  
   340  If both this flag and "--drive-use-created-date" are set, the created
   341  date is used.`,
   342  			Advanced: true,
   343  			Hide:     fs.OptionHideConfigurator,
   344  		}, {
   345  			Name:     "list_chunk",
   346  			Default:  1000,
   347  			Help:     "Size of listing chunk 100-1000. 0 to disable.",
   348  			Advanced: true,
   349  		}, {
   350  			Name:     "impersonate",
   351  			Default:  "",
   352  			Help:     "Impersonate this user when using a service account.",
   353  			Advanced: true,
   354  		}, {
   355  			Name:    "alternate_export",
   356  			Default: false,
   357  			Help: `Use alternate export URLs for google documents export.,
   358  
   359  If this option is set this instructs rclone to use an alternate set of
   360  export URLs for drive documents.  Users have reported that the
   361  official export URLs can't export large documents, whereas these
   362  unofficial ones can.
   363  
   364  See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background,
   365  [this google drive issue](https://issuetracker.google.com/issues/36761333) and
   366  [this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
   367  			Advanced: true,
   368  		}, {
   369  			Name:     "upload_cutoff",
   370  			Default:  defaultChunkSize,
   371  			Help:     "Cutoff for switching to chunked upload",
   372  			Advanced: true,
   373  		}, {
   374  			Name:    "chunk_size",
   375  			Default: defaultChunkSize,
   376  			Help: `Upload chunk size. Must a power of 2 >= 256k.
   377  
   378  Making this larger will improve performance, but note that each chunk
   379  is buffered in memory one per transfer.
   380  
   381  Reducing this will reduce memory usage but decrease performance.`,
   382  			Advanced: true,
   383  		}, {
   384  			Name:    "acknowledge_abuse",
   385  			Default: false,
   386  			Help: `Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
   387  
   388  If downloading a file returns the error "This file has been identified
   389  as malware or spam and cannot be downloaded" with the error code
   390  "cannotDownloadAbusiveFile" then supply this flag to rclone to
   391  indicate you acknowledge the risks of downloading the file and rclone
   392  will download it anyway.`,
   393  			Advanced: true,
   394  		}, {
   395  			Name:     "keep_revision_forever",
   396  			Default:  false,
   397  			Help:     "Keep new head revision of each file forever.",
   398  			Advanced: true,
   399  		}, {
   400  			Name:    "size_as_quota",
   401  			Default: false,
   402  			Help: `Show sizes as storage quota usage, not actual size.
   403  
   404  Show the size of a file as the the storage quota used. This is the
   405  current version plus any older versions that have been set to keep
   406  forever.
   407  
   408  **WARNING**: This flag may have some unexpected consequences.
   409  
   410  It is not recommended to set this flag in your config - the
   411  recommended usage is using the flag form --drive-size-as-quota when
   412  doing rclone ls/lsl/lsf/lsjson/etc only.
   413  
   414  If you do use this flag for syncing (not recommended) then you will
   415  need to use --ignore size also.`,
   416  			Advanced: true,
   417  			Hide:     fs.OptionHideConfigurator,
   418  		}, {
   419  			Name:     "v2_download_min_size",
   420  			Default:  fs.SizeSuffix(-1),
   421  			Help:     "If Object's are greater, use drive v2 API to download.",
   422  			Advanced: true,
   423  		}, {
   424  			Name:     "pacer_min_sleep",
   425  			Default:  defaultMinSleep,
   426  			Help:     "Minimum time to sleep between API calls.",
   427  			Advanced: true,
   428  		}, {
   429  			Name:     "pacer_burst",
   430  			Default:  defaultBurst,
   431  			Help:     "Number of API calls to allow without sleeping.",
   432  			Advanced: true,
   433  		}, {
   434  			Name:    "server_side_across_configs",
   435  			Default: false,
   436  			Help: `Allow server side operations (eg copy) to work across different drive configs.
   437  
   438  This can be useful if you wish to do a server side copy between two
   439  different Google drives.  Note that this isn't enabled by default
   440  because it isn't easy to tell if it will work between any two
   441  configurations.`,
   442  			Advanced: true,
   443  		}, {
   444  			Name:    "disable_http2",
   445  			Default: true,
   446  			Help: `Disable drive using http2
   447  
   448  There is currently an unsolved issue with the google drive backend and
   449  HTTP/2.  HTTP/2 is therefore disabled by default for the drive backend
   450  but can be re-enabled here.  When the issue is solved this flag will
   451  be removed.
   452  
   453  See: https://github.com/rclone/rclone/issues/3631
   454  
   455  `,
   456  			Advanced: true,
   457  		}, {
   458  			Name:    "stop_on_upload_limit",
   459  			Default: false,
   460  			Help: `Make upload limit errors be fatal
   461  
   462  At the time of writing it is only possible to upload 750GB of data to
   463  Google Drive a day (this is an undocumented limit). When this limit is
   464  reached Google Drive produces a slightly different error message. When
   465  this flag is set it causes these errors to be fatal.  These will stop
   466  the in-progress sync.
   467  
   468  Note that this detection is relying on error message strings which
   469  Google don't document so it may break in the future.
   470  
   471  See: https://github.com/rclone/rclone/issues/3857
   472  `,
   473  			Advanced: true,
   474  		}, {
   475  			Name:     config.ConfigEncoding,
   476  			Help:     config.ConfigEncodingHelp,
   477  			Advanced: true,
   478  			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
   479  			// Don't encode / as it's a valid name character in drive.
   480  			Default: encoder.EncodeInvalidUtf8,
   481  		}},
   482  	})
   483  
   484  	// register duplicate MIME types first
   485  	// this allows them to be used with mime.ExtensionsByType() but
   486  	// mime.TypeByExtension() will return the later registered MIME type
   487  	for _, m := range []map[string]string{
   488  		_mimeTypeToExtensionDuplicates, _mimeTypeToExtension, _mimeTypeToExtensionLinks,
   489  	} {
   490  		for mimeType, extension := range m {
   491  			if err := mime.AddExtensionType(extension, mimeType); err != nil {
   492  				log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
   493  			}
   494  		}
   495  	}
   496  }
   497  
   498  // Options defines the configuration for this backend
   499  type Options struct {
   500  	Scope                     string               `config:"scope"`
   501  	RootFolderID              string               `config:"root_folder_id"`
   502  	ServiceAccountFile        string               `config:"service_account_file"`
   503  	// 添加一个变量
   504  	ServiceAccountFilePath    string               `config:"service_account_file_path"`
   505  	ServiceAccountCredentials string               `config:"service_account_credentials"`
   506  	TeamDriveID               string               `config:"team_drive"`
   507  	AuthOwnerOnly             bool                 `config:"auth_owner_only"`
   508  	UseTrash                  bool                 `config:"use_trash"`
   509  	SkipGdocs                 bool                 `config:"skip_gdocs"`
   510  	SkipChecksumGphotos       bool                 `config:"skip_checksum_gphotos"`
   511  	SharedWithMe              bool                 `config:"shared_with_me"`
   512  	TrashedOnly               bool                 `config:"trashed_only"`
   513  	Extensions                string               `config:"formats"`
   514  	ExportExtensions          string               `config:"export_formats"`
   515  	ImportExtensions          string               `config:"import_formats"`
   516  	AllowImportNameChange     bool                 `config:"allow_import_name_change"`
   517  	UseCreatedDate            bool                 `config:"use_created_date"`
   518  	UseSharedDate             bool                 `config:"use_shared_date"`
   519  	ListChunk                 int64                `config:"list_chunk"`
   520  	Impersonate               string               `config:"impersonate"`
   521  	AlternateExport           bool                 `config:"alternate_export"`
   522  	UploadCutoff              fs.SizeSuffix        `config:"upload_cutoff"`
   523  	ChunkSize                 fs.SizeSuffix        `config:"chunk_size"`
   524  	AcknowledgeAbuse          bool                 `config:"acknowledge_abuse"`
   525  	KeepRevisionForever       bool                 `config:"keep_revision_forever"`
   526  	SizeAsQuota               bool                 `config:"size_as_quota"`
   527  	V2DownloadMinSize         fs.SizeSuffix        `config:"v2_download_min_size"`
   528  	PacerMinSleep             fs.Duration          `config:"pacer_min_sleep"`
   529  	PacerBurst                int                  `config:"pacer_burst"`
   530  	ServerSideAcrossConfigs   bool                 `config:"server_side_across_configs"`
   531  	DisableHTTP2              bool                 `config:"disable_http2"`
   532  	StopOnUploadLimit         bool                 `config:"stop_on_upload_limit"`
   533  	Enc                       encoder.MultiEncoder `config:"encoding"`
   534  }
   535  
   536  // Fs represents a remote drive server
   537  type Fs struct {
   538  	name             string             // name of this remote
   539  	root             string             // the path we are working on
   540  	opt              Options            // parsed options
   541  	features         *fs.Features       // optional features
   542  	svc              *drive.Service     // the connection to the drive server
   543  	v2Svc            *drive_v2.Service  // used to create download links for the v2 api
   544  	client           *http.Client       // authorized client
   545  	rootFolderID     string             // the id of the root folder
   546  	dirCache         *dircache.DirCache // Map of directory path to directory id
   547  	pacer            *fs.Pacer          // To pace the API calls
   548  	exportExtensions []string           // preferred extensions to download docs
   549  	importMimeTypes  []string           // MIME types to convert to docs
   550  	isTeamDrive      bool               // true if this is a team drive
   551  	//------------------------------------------------------------
   552  	ServiceAccountFiles    	  map[string]int
   553  	waitChangeSvc sync.Mutex
   554  	FileObj *fs.Object
   555  	FileName string
   556  
   557  }
   558  
   559  type baseObject struct {
   560  	fs           *Fs    // what this object is part of
   561  	remote       string // The remote path
   562  	id           string // Drive Id of this object
   563  	modifiedDate string // RFC3339 time it was last modified
   564  	mimeType     string // The object MIME type
   565  	bytes        int64  // size of the object
   566  }
   567  type documentObject struct {
   568  	baseObject
   569  	url              string // Download URL of this object
   570  	documentMimeType string // the original document MIME type
   571  	extLen           int    // The length of the added export extension
   572  }
   573  type linkObject struct {
   574  	baseObject
   575  	content []byte // The file content generated by a link template
   576  	extLen  int    // The length of the added export extension
   577  }
   578  
   579  // Object describes a drive object
   580  type Object struct {
   581  	baseObject
   582  	url        string // Download URL of this object
   583  	md5sum     string // md5sum of the object
   584  	v2Download bool   // generate v2 download link ondemand
   585  }
   586  
   587  // ------------------------------------------------------------
   588  
   589  // Name of the remote (as passed into NewFs)
   590  func (f *Fs) Name() string {
   591  	return f.name
   592  }
   593  
   594  // Root of the remote (as passed into NewFs)
   595  func (f *Fs) Root() string {
   596  	return f.root
   597  }
   598  
   599  // String converts this Fs to a string
   600  func (f *Fs) String() string {
   601  	return fmt.Sprintf("Google drive root '%s'", f.root)
   602  }
   603  
   604  // Features returns the optional features of this Fs
   605  func (f *Fs) Features() *fs.Features {
   606  	return f.features
   607  }
   608  
   609  // shouldRetry determines whether a given err rates being retried
   610  func (f *Fs) shouldRetry(err error) (bool, error) {
   611  	if err == nil {
   612  		return false, nil
   613  	}
   614  	if fserrors.ShouldRetry(err) {
   615  		return true, err
   616  	}
   617  	switch gerr := err.(type) {
   618  	case *googleapi.Error:
   619  		if gerr.Code >= 500 && gerr.Code < 600 {
   620  			// All 5xx errors should be retried
   621  			return true, err
   622  		}
   623  		if len(gerr.Errors) > 0 {
   624  			reason := gerr.Errors[0].Reason
   625  			if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
   626  				// 如果存在 ServiceAccountFilePath,调用 changeSvc, 重试
   627  				if(f.opt.ServiceAccountFilePath != ""){
   628  					f.waitChangeSvc.Lock()
   629  					f.changeSvc()
   630  					f.waitChangeSvc.Unlock()
   631  					return true, err
   632  				}
   633  				if f.opt.StopOnUploadLimit && gerr.Errors[0].Message == "User rate limit exceeded." {
   634  					fs.Errorf(f, "Received upload limit error: %v", err)
   635  					return false, fserrors.FatalError(err)
   636  				}
   637  				return true, err
   638  			}
   639  		}
   640  	}
   641  	return false, err
   642  }
   643  
   644  // 替换 f.svc 函数
   645  func (f *Fs) changeSvc(){
   646  	opt := &f.opt;
   647  	/**
   648  	 *  获取sa文件列表
   649  	 */
   650  	if(opt.ServiceAccountFilePath != "" && len(f.ServiceAccountFiles) == 0){
   651  		f.ServiceAccountFiles = make(map[string]int)
   652  		dir_list, e := ioutil.ReadDir(opt.ServiceAccountFilePath)
   653  		if e != nil {
   654  			fmt.Println("read ServiceAccountFilePath Files error")
   655  		}
   656  		for i, v := range dir_list {
   657  			filePath := fmt.Sprintf("%s%s", opt.ServiceAccountFilePath, v.Name())
   658  			if(".json" == path.Ext(filePath)){
   659  				//fmt.Println(filePath)
   660  				f.ServiceAccountFiles[filePath] = i
   661  			}
   662  		}
   663  	}
   664  	// 如果读取文件夹后还是0 , 退出
   665  	if(len(f.ServiceAccountFiles) <= 0){
   666  		return ;
   667  	}
   668  	/**
   669  	 *  从sa文件列表 随机取一个,并删除列表中的元素
   670  	 */
   671  	r := rand.Intn(len(f.ServiceAccountFiles))
   672  	for k := range f.ServiceAccountFiles {
   673  		if r == 0 {
   674  			opt.ServiceAccountFile = k
   675  		}
   676  		r--
   677  	}
   678  	// 从库存中删除
   679  	delete(f.ServiceAccountFiles, opt.ServiceAccountFile)
   680  
   681  	/**
   682  	 * 创建 client 和 svc
   683  	 */
   684  	loadedCreds, _ := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
   685  	opt.ServiceAccountCredentials = string(loadedCreds)
   686  	oAuthClient, err := getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
   687  	if err != nil {
   688  		errors.Wrap(err, "failed to create oauth client from service account")
   689  	}
   690  	f.client = oAuthClient
   691  	f.svc, err = drive.New(f.client)
   692  	fmt.Println("gclone sa file:", opt.ServiceAccountFile)
   693  }
   694  
   695  // parseParse parses a drive 'url'
   696  func parseDrivePath(path string) (root string, err error) {
   697  	root = strings.Trim(path, "/")
   698  	return
   699  }
   700  
   701  // User function to process a File item from list
   702  //
   703  // Should return true to finish processing
   704  type listFn func(*drive.File) bool
   705  
   706  func containsString(slice []string, s string) bool {
   707  	for _, e := range slice {
   708  		if e == s {
   709  			return true
   710  		}
   711  	}
   712  	return false
   713  }
   714  
   715  // getRootID returns the canonical ID for the "root" ID
   716  func (f *Fs) getRootID() (string, error) {
   717  	var info *drive.File
   718  	var err error
   719  	err = f.pacer.CallNoRetry(func() (bool, error) {
   720  		info, err = f.svc.Files.Get("root").
   721  			Fields("id").
   722  			SupportsAllDrives(true).
   723  			Do()
   724  		return f.shouldRetry(err)
   725  	})
   726  	if err != nil {
   727  		return "", errors.Wrap(err, "couldn't find root directory ID")
   728  	}
   729  	return info.Id, nil
   730  }
   731  
   732  // Lists the directory required calling the user function on each item found
   733  //
   734  // If the user fn ever returns true then it early exits with found = true
   735  //
   736  // Search params: https://developers.google.com/drive/search-parameters
   737  func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
   738  	//f.changeSvc()
   739  	var query []string
   740  	if !includeAll {
   741  		q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
   742  		if f.opt.TrashedOnly {
   743  			q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
   744  		}
   745  		query = append(query, q)
   746  	}
   747  	// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
   748  	// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
   749  	// If we need to list file inside those shared folders, we must search it without sharedWithMe
   750  	parentsQuery := bytes.NewBufferString("(")
   751  	for _, dirID := range dirIDs {
   752  		if dirID == "" {
   753  			continue
   754  		}
   755  		if parentsQuery.Len() > 1 {
   756  			_, _ = parentsQuery.WriteString(" or ")
   757  		}
   758  		if f.opt.SharedWithMe && dirID == f.rootFolderID {
   759  			_, _ = parentsQuery.WriteString("sharedWithMe=true")
   760  		} else {
   761  			_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
   762  		}
   763  	}
   764  	if parentsQuery.Len() > 1 {
   765  		_ = parentsQuery.WriteByte(')')
   766  		query = append(query, parentsQuery.String())
   767  	}
   768  	var stems []string
   769  	if title != "" {
   770  		searchTitle := f.opt.Enc.FromStandardName(title)
   771  		// Escaping the backslash isn't documented but seems to work
   772  		searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
   773  		searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
   774  
   775  		var titleQuery bytes.Buffer
   776  		_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
   777  		if !directoriesOnly && !f.opt.SkipGdocs {
   778  			// If the search title has an extension that is in the export extensions add a search
   779  			// for the filename without the extension.
   780  			// Assume that export extensions don't contain escape sequences.
   781  			for _, ext := range f.exportExtensions {
   782  				if strings.HasSuffix(searchTitle, ext) {
   783  					stems = append(stems, title[:len(title)-len(ext)])
   784  					_, _ = fmt.Fprintf(&titleQuery, " or name='%s'", searchTitle[:len(searchTitle)-len(ext)])
   785  				}
   786  			}
   787  		}
   788  		_ = titleQuery.WriteByte(')')
   789  		query = append(query, titleQuery.String())
   790  	}
   791  	if directoriesOnly {
   792  		query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
   793  	}
   794  	if filesOnly {
   795  		query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
   796  	}
   797  	list := f.svc.Files.List()
   798  	if len(query) > 0 {
   799  		list.Q(strings.Join(query, " and "))
   800  		// fmt.Printf("list Query = %q\n", query)
   801  	}
   802  	if f.opt.ListChunk > 0 {
   803  		list.PageSize(f.opt.ListChunk)
   804  	}
   805  	list.SupportsAllDrives(true)
   806  	list.IncludeItemsFromAllDrives(true)
   807  	if f.isTeamDrive {
   808  		list.DriveId(f.opt.TeamDriveID)
   809  		list.Corpora("drive")
   810  	}
   811  	// If using appDataFolder then need to add Spaces
   812  	if f.rootFolderID == "appDataFolder" {
   813  		list.Spaces("appDataFolder")
   814  	}
   815  
   816  	var fields = partialFields
   817  
   818  	if f.opt.AuthOwnerOnly {
   819  		fields += ",owners"
   820  	}
   821  	if f.opt.UseSharedDate {
   822  		fields += ",sharedWithMeTime"
   823  	}
   824  	if f.opt.SkipChecksumGphotos {
   825  		fields += ",spaces"
   826  	}
   827  	if f.opt.SizeAsQuota {
   828  		fields += ",quotaBytesUsed"
   829  	}
   830  
   831  	fields = fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", fields)
   832  
   833  OUTER:
   834  	for {
   835  		var files *drive.FileList
   836  		err = f.pacer.Call(func() (bool, error) {
   837  			files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
   838  			return f.shouldRetry(err)
   839  		})
   840  		if err != nil {
   841  			return false, errors.Wrap(err, "couldn't list directory")
   842  		}
   843  		if files.IncompleteSearch {
   844  			fs.Errorf(f, "search result INCOMPLETE")
   845  		}
   846  		for _, item := range files.Files {
   847  			item.Name = f.opt.Enc.ToStandardName(item.Name)
   848  			// Check the case of items is correct since
   849  			// the `=` operator is case insensitive.
   850  			if title != "" && title != item.Name {
   851  				found := false
   852  				for _, stem := range stems {
   853  					if stem == item.Name {
   854  						found = true
   855  						break
   856  					}
   857  				}
   858  				if !found {
   859  					continue
   860  				}
   861  				_, exportName, _, _ := f.findExportFormat(item)
   862  				if exportName == "" || exportName != title {
   863  					continue
   864  				}
   865  			}
   866  			if fn(item) {
   867  				found = true
   868  				break OUTER
   869  			}
   870  		}
   871  		if files.NextPageToken == "" {
   872  			break
   873  		}
   874  		list.PageToken(files.NextPageToken)
   875  	}
   876  	return
   877  }
   878  
   879  // Returns true of x is a power of 2 or zero
   880  func isPowerOfTwo(x int64) bool {
   881  	switch {
   882  	case x == 0:
   883  		return true
   884  	case x < 0:
   885  		return false
   886  	default:
   887  		return (x & (x - 1)) == 0
   888  	}
   889  }
   890  
   891  // add a charset parameter to all text/* MIME types
   892  func fixMimeType(mimeTypeIn string) string {
   893  	if mimeTypeIn == "" {
   894  		return ""
   895  	}
   896  	mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
   897  	if err != nil {
   898  		return mimeTypeIn
   899  	}
   900  	mimeTypeOut := mimeTypeIn
   901  	if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
   902  		param["charset"] = "utf-8"
   903  		mimeTypeOut = mime.FormatMediaType(mediaType, param)
   904  	}
   905  	if mimeTypeOut == "" {
   906  		panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
   907  	}
   908  	return mimeTypeOut
   909  }
   910  func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
   911  	out = make(map[string][]string, len(in))
   912  	for k, v := range in {
   913  		for i, mt := range v {
   914  			v[i] = fixMimeType(mt)
   915  		}
   916  		out[fixMimeType(k)] = v
   917  	}
   918  	return out
   919  }
   920  func isInternalMimeType(mimeType string) bool {
   921  	return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
   922  }
   923  func isLinkMimeType(mimeType string) bool {
   924  	return strings.HasPrefix(mimeType, "application/x-link-")
   925  }
   926  
   927  // parseExtensions parses a list of comma separated extensions
   928  // into a list of unique extensions with leading "." and a list of associated MIME types
   929  func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
   930  	for _, extensionText := range extensionsIn {
   931  		for _, extension := range strings.Split(extensionText, ",") {
   932  			extension = strings.ToLower(strings.TrimSpace(extension))
   933  			if extension == "" {
   934  				continue
   935  			}
   936  			if len(extension) > 0 && extension[0] != '.' {
   937  				extension = "." + extension
   938  			}
   939  			mt := mime.TypeByExtension(extension)
   940  			if mt == "" {
   941  				return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
   942  			}
   943  			if !containsString(extensions, extension) {
   944  				extensions = append(extensions, extension)
   945  				mimeTypes = append(mimeTypes, mt)
   946  			}
   947  		}
   948  	}
   949  	return
   950  }
   951  
   952  // Figure out if the user wants to use a team drive
   953  func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
   954  	// Stop if we are running non-interactive config
   955  	if fs.Config.AutoConfirm {
   956  		return nil
   957  	}
   958  	if opt.TeamDriveID == "" {
   959  		fmt.Printf("Configure this as a team drive?\n")
   960  	} else {
   961  		fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
   962  	}
   963  	if !config.Confirm(false) {
   964  		return nil
   965  	}
   966  	client, err := createOAuthClient(opt, name, m)
   967  	if err != nil {
   968  		return errors.Wrap(err, "config team drive failed to create oauth client")
   969  	}
   970  	svc, err := drive.New(client)
   971  	if err != nil {
   972  		return errors.Wrap(err, "config team drive failed to make drive client")
   973  	}
   974  	fmt.Printf("Fetching team drive list...\n")
   975  	var driveIDs, driveNames []string
   976  	listTeamDrives := svc.Teamdrives.List().PageSize(100)
   977  	listFailed := false
   978  	var defaultFs Fs // default Fs with default Options
   979  	for {
   980  		var teamDrives *drive.TeamDriveList
   981  		err = newPacer(opt).Call(func() (bool, error) {
   982  			teamDrives, err = listTeamDrives.Context(ctx).Do()
   983  			return defaultFs.shouldRetry(err)
   984  		})
   985  		if err != nil {
   986  			fmt.Printf("Listing team drives failed: %v\n", err)
   987  			listFailed = true
   988  			break
   989  		}
   990  		for _, drive := range teamDrives.TeamDrives {
   991  			driveIDs = append(driveIDs, drive.Id)
   992  			driveNames = append(driveNames, drive.Name)
   993  		}
   994  		if teamDrives.NextPageToken == "" {
   995  			break
   996  		}
   997  		listTeamDrives.PageToken(teamDrives.NextPageToken)
   998  	}
   999  	var driveID string
  1000  	if !listFailed && len(driveIDs) == 0 {
  1001  		fmt.Printf("No team drives found in your account")
  1002  	} else {
  1003  		driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
  1004  	}
  1005  	m.Set("team_drive", driveID)
  1006  	opt.TeamDriveID = driveID
  1007  	return nil
  1008  }
  1009  
  1010  // newPacer makes a pacer configured for drive
  1011  func newPacer(opt *Options) *fs.Pacer {
  1012  	return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
  1013  }
  1014  
  1015  // getClient makes an http client according to the options
  1016  func getClient(opt *Options) *http.Client {
  1017  	t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
  1018  		if opt.DisableHTTP2 {
  1019  			t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
  1020  		}
  1021  	})
  1022  	return &http.Client{
  1023  		Transport: t,
  1024  	}
  1025  }
  1026  
  1027  func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
  1028  	scopes := driveScopes(opt.Scope)
  1029  	conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
  1030  	if err != nil {
  1031  		return nil, errors.Wrap(err, "error processing credentials")
  1032  	}
  1033  	if opt.Impersonate != "" {
  1034  		conf.Subject = opt.Impersonate
  1035  	}
  1036  	ctxWithSpecialClient := oauthutil.Context(getClient(opt))
  1037  	return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
  1038  }
  1039  
  1040  func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
  1041  	var oAuthClient *http.Client
  1042  	var err error
  1043  
  1044  	// try loading service account credentials from env variable, then from a file
  1045  	if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
  1046  		loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
  1047  		if err != nil {
  1048  			return nil, errors.Wrap(err, "error opening service account credentials file")
  1049  		}
  1050  		opt.ServiceAccountCredentials = string(loadedCreds)
  1051  	}
  1052  	if opt.ServiceAccountCredentials != "" {
  1053  		oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
  1054  		if err != nil {
  1055  			return nil, errors.Wrap(err, "failed to create oauth client from service account")
  1056  		}
  1057  	} else {
  1058  		oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
  1059  		if err != nil {
  1060  			return nil, errors.Wrap(err, "failed to create oauth client")
  1061  		}
  1062  	}
  1063  
  1064  	return oAuthClient, nil
  1065  }
  1066  
  1067  func checkUploadChunkSize(cs fs.SizeSuffix) error {
  1068  	if !isPowerOfTwo(int64(cs)) {
  1069  		return errors.Errorf("%v isn't a power of two", cs)
  1070  	}
  1071  	if cs < minChunkSize {
  1072  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
  1073  	}
  1074  	return nil
  1075  }
  1076  
  1077  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1078  	err = checkUploadChunkSize(cs)
  1079  	if err == nil {
  1080  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
  1081  	}
  1082  	return
  1083  }
  1084  
  1085  func checkUploadCutoff(cs fs.SizeSuffix) error {
  1086  	return nil
  1087  }
  1088  
  1089  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1090  	err = checkUploadCutoff(cs)
  1091  	if err == nil {
  1092  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
  1093  	}
  1094  	return
  1095  }
  1096  
  1097  // NewFs constructs an Fs from the path, container:path
  1098  func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
  1099  	ctx := context.Background()
  1100  	// Parse config into Options struct
  1101  	opt := new(Options)
  1102  	err := configstruct.Set(m, opt)
  1103  	//-----------------------------------------------------------
  1104  	maybeIsFile := false
  1105  	// 添加  {id} 作为根目录功能
  1106  	if(path != "" && path[0:1] == "{"){
  1107  		idIndex := strings.Index(path,"}")
  1108  		if(idIndex > 0){
  1109  			RootId := path[1:idIndex];
  1110  			name += RootId
  1111  			//opt.ServerSideAcrossConfigs = true
  1112  			if(len(RootId) == 33){
  1113  				maybeIsFile = true
  1114  				opt.RootFolderID = RootId;
  1115  			}else{
  1116  				opt.RootFolderID = RootId;
  1117  				opt.TeamDriveID = RootId;
  1118  			}
  1119  			path = path[idIndex+1:]
  1120  		}
  1121  	}
  1122  
  1123  	//-----------------------------------------------------------
  1124  	if err != nil {
  1125  		return nil, err
  1126  	}
  1127  	err = checkUploadCutoff(opt.UploadCutoff)
  1128  	if err != nil {
  1129  		return nil, errors.Wrap(err, "drive: upload cutoff")
  1130  	}
  1131  	err = checkUploadChunkSize(opt.ChunkSize)
  1132  	if err != nil {
  1133  		return nil, errors.Wrap(err, "drive: chunk size")
  1134  	}
  1135  
  1136  	oAuthClient, err := createOAuthClient(opt, name, m)
  1137  	if err != nil {
  1138  		return nil, errors.Wrap(err, "drive: failed when making oauth client")
  1139  	}
  1140  
  1141  	root, err := parseDrivePath(path)
  1142  	if err != nil {
  1143  		return nil, err
  1144  	}
  1145  
  1146  	f := &Fs{
  1147  		name:  name,
  1148  		root:  root,
  1149  		opt:   *opt,
  1150  		pacer: newPacer(opt),
  1151  	}
  1152  	f.isTeamDrive = opt.TeamDriveID != ""
  1153  	f.features = (&fs.Features{
  1154  		DuplicateFiles:          true,
  1155  		ReadMimeType:            true,
  1156  		WriteMimeType:           true,
  1157  		CanHaveEmptyDirectories: true,
  1158  		ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
  1159  	}).Fill(f)
  1160  
  1161  	// Create a new authorized Drive client.
  1162  	f.client = oAuthClient
  1163  	f.svc, err = drive.New(f.client)
  1164  	if err != nil {
  1165  		return nil, errors.Wrap(err, "couldn't create Drive client")
  1166  	}
  1167  
  1168  	if f.opt.V2DownloadMinSize >= 0 {
  1169  		f.v2Svc, err = drive_v2.New(f.client)
  1170  		if err != nil {
  1171  			return nil, errors.Wrap(err, "couldn't create Drive v2 client")
  1172  		}
  1173  	}
  1174  
  1175  
  1176  	// set root folder for a team drive or query the user root folder
  1177  	if opt.RootFolderID != "" {
  1178  		// override root folder if set or cached in the config
  1179  		f.rootFolderID = opt.RootFolderID
  1180  	} else if f.isTeamDrive {
  1181  		f.rootFolderID = f.opt.TeamDriveID
  1182  	} else {
  1183  		// Look up the root ID and cache it in the config
  1184  		rootID, err := f.getRootID()
  1185  		if err != nil {
  1186  			if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
  1187  				// 404 means that this scope does not have permission to get the
  1188  				// root so just use "root"
  1189  				rootID = "root"
  1190  			} else {
  1191  				return nil, err
  1192  			}
  1193  		}
  1194  		f.rootFolderID = rootID
  1195  		m.Set("root_folder_id", rootID)
  1196  	}
  1197  
  1198  	f.dirCache = dircache.New(root, f.rootFolderID, f)
  1199  
  1200  	// Parse extensions
  1201  	if opt.Extensions != "" {
  1202  		if opt.ExportExtensions != defaultExportExtensions {
  1203  			return nil, errors.New("only one of 'formats' and 'export_formats' can be specified")
  1204  		}
  1205  		opt.Extensions, opt.ExportExtensions = "", opt.Extensions
  1206  	}
  1207  	f.exportExtensions, _, err = parseExtensions(opt.ExportExtensions, defaultExportExtensions)
  1208  	if err != nil {
  1209  		return nil, err
  1210  	}
  1211  
  1212  	_, f.importMimeTypes, err = parseExtensions(opt.ImportExtensions)
  1213  	if err != nil {
  1214  		return nil, err
  1215  	}
  1216  	//------------------------------------------------------
  1217  	if(maybeIsFile){
  1218  		file,err := f.svc.Files.Get(opt.RootFolderID).Fields("name","id","size","mimeType").SupportsAllDrives(true).Do()
  1219  		if err == nil{
  1220  			//fmt.Println("file.MimeType", file.MimeType)
  1221  			if( "application/vnd.google-apps.folder" != file.MimeType && file.MimeType != ""){
  1222  				tempF := *f
  1223  				newRoot := ""
  1224  				tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
  1225  				tempF.root = newRoot
  1226  				f.dirCache = tempF.dirCache
  1227  				f.root = tempF.root
  1228  
  1229  				extension, exportName, exportMimeType, isDocument := f.findExportFormat(file)
  1230  				obj, _ := f.newObjectWithExportInfo(file.Name, file, extension, exportName, exportMimeType, isDocument)
  1231  				f.root = "isFile:"+file.Name
  1232  				f.FileObj = &obj
  1233  				return f, fs.ErrorIsFile
  1234  			}
  1235  		}
  1236  	}
  1237  	//------------------------------------------------------
  1238  
  1239  	// Find the current root
  1240  	err = f.dirCache.FindRoot(ctx, false)
  1241  	if err != nil {
  1242  		// Assume it is a file
  1243  		newRoot, remote := dircache.SplitPath(root)
  1244  		tempF := *f
  1245  		tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
  1246  		tempF.root = newRoot
  1247  		// Make new Fs which is the parent
  1248  		err = tempF.dirCache.FindRoot(ctx, false)
  1249  		if err != nil {
  1250  			// No root so return old f
  1251  			return f, nil
  1252  		}
  1253  		_, err := tempF.NewObject(ctx, remote)
  1254  		if err != nil {
  1255  			// unable to list folder so return old f
  1256  			return f, nil
  1257  		}
  1258  		// XXX: update the old f here instead of returning tempF, since
  1259  		// `features` were already filled with functions having *f as a receiver.
  1260  		// See https://github.com/rclone/rclone/issues/2182
  1261  		f.dirCache = tempF.dirCache
  1262  		f.root = tempF.root
  1263  		return f, fs.ErrorIsFile
  1264  	}
  1265  	// fmt.Printf("Root id %s", f.dirCache.RootID())
  1266  	return f, nil
  1267  }
  1268  
  1269  func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
  1270  	modifiedDate := info.ModifiedTime
  1271  	if f.opt.UseCreatedDate {
  1272  		modifiedDate = info.CreatedTime
  1273  	} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
  1274  		modifiedDate = info.SharedWithMeTime
  1275  	}
  1276  	size := info.Size
  1277  	if f.opt.SizeAsQuota {
  1278  		size = info.QuotaBytesUsed
  1279  	}
  1280  	return baseObject{
  1281  		fs:           f,
  1282  		remote:       remote,
  1283  		id:           info.Id,
  1284  		modifiedDate: modifiedDate,
  1285  		mimeType:     info.MimeType,
  1286  		bytes:        size,
  1287  	}
  1288  }
  1289  
  1290  // newRegularObject creates a fs.Object for a normal drive.File
  1291  func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
  1292  	// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
  1293  	if f.opt.SkipChecksumGphotos {
  1294  		for _, space := range info.Spaces {
  1295  			if space == "photos" {
  1296  				info.Md5Checksum = ""
  1297  				break
  1298  			}
  1299  		}
  1300  	}
  1301  	return &Object{
  1302  		baseObject: f.newBaseObject(remote, info),
  1303  		url:        fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
  1304  		md5sum:     strings.ToLower(info.Md5Checksum),
  1305  		v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
  1306  	}
  1307  }
  1308  
  1309  // newDocumentObject creates a fs.Object for a google docs drive.File
  1310  func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
  1311  	mediaType, _, err := mime.ParseMediaType(exportMimeType)
  1312  	if err != nil {
  1313  		return nil, err
  1314  	}
  1315  	url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, info.Id, url.QueryEscape(mediaType))
  1316  	if f.opt.AlternateExport {
  1317  		switch info.MimeType {
  1318  		case "application/vnd.google-apps.drawing":
  1319  			url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension[1:])
  1320  		case "application/vnd.google-apps.document":
  1321  			url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension[1:])
  1322  		case "application/vnd.google-apps.spreadsheet":
  1323  			url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension[1:])
  1324  		case "application/vnd.google-apps.presentation":
  1325  			url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension[1:])
  1326  		}
  1327  	}
  1328  	baseObject := f.newBaseObject(remote+extension, info)
  1329  	baseObject.bytes = -1
  1330  	baseObject.mimeType = exportMimeType
  1331  	return &documentObject{
  1332  		baseObject:       baseObject,
  1333  		url:              url,
  1334  		documentMimeType: info.MimeType,
  1335  		extLen:           len(extension),
  1336  	}, nil
  1337  }
  1338  
  1339  // newLinkObject creates a fs.Object that represents a link a google docs drive.File
  1340  func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
  1341  	t := linkTemplate(exportMimeType)
  1342  	if t == nil {
  1343  		return nil, errors.Errorf("unsupported link type %s", exportMimeType)
  1344  	}
  1345  	var buf bytes.Buffer
  1346  	err := t.Execute(&buf, struct {
  1347  		URL, Title string
  1348  	}{
  1349  		info.WebViewLink, info.Name,
  1350  	})
  1351  	if err != nil {
  1352  		return nil, errors.Wrap(err, "executing template failed")
  1353  	}
  1354  
  1355  	baseObject := f.newBaseObject(remote+extension, info)
  1356  	baseObject.bytes = int64(buf.Len())
  1357  	baseObject.mimeType = exportMimeType
  1358  	return &linkObject{
  1359  		baseObject: baseObject,
  1360  		content:    buf.Bytes(),
  1361  		extLen:     len(extension),
  1362  	}, nil
  1363  }
  1364  
  1365  // newObjectWithInfo creates a fs.Object for any drive.File
  1366  //
  1367  // When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
  1368  func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
  1369  	// If item has MD5 sum or a length it is a file stored on drive
  1370  	if info.Md5Checksum != "" || info.Size > 0 {
  1371  		return f.newRegularObject(remote, info), nil
  1372  	}
  1373  
  1374  	extension, exportName, exportMimeType, isDocument := f.findExportFormat(info)
  1375  	return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
  1376  }
  1377  
  1378  // newObjectWithExportInfo creates a fs.Object for any drive.File and the result of findExportFormat
  1379  //
  1380  // When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
  1381  func (f *Fs) newObjectWithExportInfo(
  1382  	remote string, info *drive.File,
  1383  	extension, exportName, exportMimeType string, isDocument bool) (fs.Object, error) {
  1384  	switch {
  1385  	case info.Md5Checksum != "" || info.Size > 0:
  1386  		// If item has MD5 sum or a length it is a file stored on drive
  1387  		return f.newRegularObject(remote, info), nil
  1388  	case f.opt.SkipGdocs:
  1389  		fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
  1390  		return nil, nil
  1391  	default:
  1392  		// If item MimeType is in the ExportFormats then it is a google doc
  1393  		if !isDocument {
  1394  			fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType)
  1395  			return nil, nil
  1396  		}
  1397  		if extension == "" {
  1398  			fs.Debugf(remote, "No export formats found for %q", info.MimeType)
  1399  			return nil, nil
  1400  		}
  1401  		if isLinkMimeType(exportMimeType) {
  1402  			return f.newLinkObject(remote, info, extension, exportMimeType)
  1403  		}
  1404  		return f.newDocumentObject(remote, info, extension, exportMimeType)
  1405  	}
  1406  }
  1407  
  1408  // NewObject finds the Object at remote.  If it can't be found
  1409  // it returns the error fs.ErrorObjectNotFound.
  1410  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  1411  	//------------------------------------
  1412  	if(f.FileObj != nil){
  1413  		return *f.FileObj, nil
  1414  	}
  1415  	//-------------------------------------
  1416  	info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
  1417  	if err != nil {
  1418  		return nil, err
  1419  	}
  1420  
  1421  	remote = remote[:len(remote)-len(extension)]
  1422  	obj, err := f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
  1423  	switch {
  1424  	case err != nil:
  1425  		return nil, err
  1426  	case obj == nil:
  1427  		return nil, fs.ErrorObjectNotFound
  1428  	default:
  1429  		return obj, nil
  1430  	}
  1431  }
  1432  
  1433  // FindLeaf finds a directory of name leaf in the folder with ID pathID
  1434  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
  1435  	// Find the leaf in pathID
  1436  	found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
  1437  		if !f.opt.SkipGdocs {
  1438  			_, exportName, _, isDocument := f.findExportFormat(item)
  1439  			if exportName == leaf {
  1440  				pathIDOut = item.Id
  1441  				return true
  1442  			}
  1443  			if isDocument {
  1444  				return false
  1445  			}
  1446  		}
  1447  		if item.Name == leaf {
  1448  			pathIDOut = item.Id
  1449  			return true
  1450  		}
  1451  		return false
  1452  	})
  1453  	return pathIDOut, found, err
  1454  }
  1455  
  1456  // CreateDir makes a directory with pathID as parent and name leaf
  1457  func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
  1458  	leaf = f.opt.Enc.FromStandardName(leaf)
  1459  	// fmt.Println("Making", path)
  1460  	// Define the metadata for the directory we are going to create.
  1461  	createInfo := &drive.File{
  1462  		Name:        leaf,
  1463  		Description: leaf,
  1464  		MimeType:    driveFolderType,
  1465  		Parents:     []string{pathID},
  1466  	}
  1467  	var info *drive.File
  1468  	err = f.pacer.Call(func() (bool, error) {
  1469  		info, err = f.svc.Files.Create(createInfo).
  1470  			Fields("id").
  1471  			SupportsAllDrives(true).
  1472  			Do()
  1473  		return f.shouldRetry(err)
  1474  	})
  1475  	if err != nil {
  1476  		return "", err
  1477  	}
  1478  	return info.Id, nil
  1479  }
  1480  
  1481  // isAuthOwned checks if any of the item owners is the authenticated owner
  1482  func isAuthOwned(item *drive.File) bool {
  1483  	for _, owner := range item.Owners {
  1484  		if owner.Me {
  1485  			return true
  1486  		}
  1487  	}
  1488  	return false
  1489  }
  1490  
  1491  // linkTemplate returns the Template for a MIME type or nil if the
  1492  // MIME type does not represent a link
  1493  func linkTemplate(mt string) *template.Template {
  1494  	templatesOnce.Do(func() {
  1495  		_linkTemplates = map[string]*template.Template{
  1496  			"application/x-link-desktop": template.Must(
  1497  				template.New("application/x-link-desktop").Parse(desktopTemplate)),
  1498  			"application/x-link-html": template.Must(
  1499  				template.New("application/x-link-html").Parse(htmlTemplate)),
  1500  			"application/x-link-url": template.Must(
  1501  				template.New("application/x-link-url").Parse(urlTemplate)),
  1502  			"application/x-link-webloc": template.Must(
  1503  				template.New("application/x-link-webloc").Parse(weblocTemplate)),
  1504  		}
  1505  	})
  1506  	return _linkTemplates[mt]
  1507  }
  1508  func (f *Fs) fetchFormats() {
  1509  	fetchFormatsOnce.Do(func() {
  1510  		var about *drive.About
  1511  		var err error
  1512  		err = f.pacer.Call(func() (bool, error) {
  1513  			about, err = f.svc.About.Get().
  1514  				Fields("exportFormats,importFormats").
  1515  				Do()
  1516  			return f.shouldRetry(err)
  1517  		})
  1518  		if err != nil {
  1519  			fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
  1520  			_exportFormats = map[string][]string{}
  1521  			_importFormats = map[string][]string{}
  1522  			return
  1523  		}
  1524  		_exportFormats = fixMimeTypeMap(about.ExportFormats)
  1525  		_importFormats = fixMimeTypeMap(about.ImportFormats)
  1526  	})
  1527  }
  1528  
  1529  // exportFormats returns the export formats from drive, fetching them
  1530  // if necessary.
  1531  //
  1532  // if the fetch fails then it will not export any drive formats
  1533  func (f *Fs) exportFormats() map[string][]string {
  1534  	f.fetchFormats()
  1535  	return _exportFormats
  1536  }
  1537  
  1538  // importFormats returns the import formats from drive, fetching them
  1539  // if necessary.
  1540  //
  1541  // if the fetch fails then it will not import any drive formats
  1542  func (f *Fs) importFormats() map[string][]string {
  1543  	f.fetchFormats()
  1544  	return _importFormats
  1545  }
  1546  
  1547  // findExportFormatByMimeType works out the optimum export settings
  1548  // for the given MIME type.
  1549  //
  1550  // Look through the exportExtensions and find the first format that can be
  1551  // converted.  If none found then return ("", "", false)
  1552  func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
  1553  	extension, mimeType string, isDocument bool) {
  1554  	exportMimeTypes, isDocument := f.exportFormats()[itemMimeType]
  1555  	if isDocument {
  1556  		for _, _extension := range f.exportExtensions {
  1557  			_mimeType := mime.TypeByExtension(_extension)
  1558  			if isLinkMimeType(_mimeType) {
  1559  				return _extension, _mimeType, true
  1560  			}
  1561  			for _, emt := range exportMimeTypes {
  1562  				if emt == _mimeType {
  1563  					return _extension, emt, true
  1564  				}
  1565  				if _mimeType == _mimeTypeCustomTransform[emt] {
  1566  					return _extension, emt, true
  1567  				}
  1568  			}
  1569  		}
  1570  	}
  1571  
  1572  	// else return empty
  1573  	return "", "", isDocument
  1574  }
  1575  
  1576  // findExportFormatByMimeType works out the optimum export settings
  1577  // for the given drive.File.
  1578  //
  1579  // Look through the exportExtensions and find the first format that can be
  1580  // converted.  If none found then return ("", "", "", false)
  1581  func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
  1582  	extension, mimeType, isDocument = f.findExportFormatByMimeType(item.MimeType)
  1583  	if extension != "" {
  1584  		filename = item.Name + extension
  1585  	}
  1586  	return
  1587  }
  1588  
  1589  // findImportFormat finds the matching upload MIME type for a file
  1590  // If the given MIME type is in importMimeTypes, the matching upload
  1591  // MIME type is returned
  1592  //
  1593  // When no match is found "" is returned.
  1594  func (f *Fs) findImportFormat(mimeType string) string {
  1595  	mimeType = fixMimeType(mimeType)
  1596  	ifs := f.importFormats()
  1597  	for _, mt := range f.importMimeTypes {
  1598  		if mt == mimeType {
  1599  			importMimeTypes := ifs[mimeType]
  1600  			if l := len(importMimeTypes); l > 0 {
  1601  				if l > 1 {
  1602  					fs.Infof(f, "found %d import formats for %q: %q", l, mimeType, importMimeTypes)
  1603  				}
  1604  				return importMimeTypes[0]
  1605  			}
  1606  		}
  1607  	}
  1608  	return ""
  1609  }
  1610  
  1611  // List the objects and directories in dir into entries.  The
  1612  // entries can be returned in any order but should be for a
  1613  // complete directory.
  1614  //
  1615  // dir should be "" to list the root, and should not have
  1616  // trailing slashes.
  1617  //
  1618  // This should return ErrDirNotFound if the directory isn't
  1619  // found.
  1620  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  1621  	err = f.dirCache.FindRoot(ctx, false)
  1622  	if err != nil {
  1623  		return nil, err
  1624  	}
  1625  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
  1626  	if err != nil {
  1627  		return nil, err
  1628  	}
  1629  
  1630  	var iErr error
  1631  	_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
  1632  		entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
  1633  		if err != nil {
  1634  			iErr = err
  1635  			return true
  1636  		}
  1637  		if entry != nil {
  1638  			entries = append(entries, entry)
  1639  		}
  1640  		return false
  1641  	})
  1642  	if err != nil {
  1643  		return nil, err
  1644  	}
  1645  	if iErr != nil {
  1646  		return nil, iErr
  1647  	}
  1648  	// If listing the root of a teamdrive and got no entries,
  1649  	// double check we have access
  1650  	if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
  1651  		err = f.teamDriveOK(ctx)
  1652  		if err != nil {
  1653  			return nil, err
  1654  		}
  1655  	}
  1656  	return entries, nil
  1657  }
  1658  
  1659  // listREntry is a task to be executed by a litRRunner
  1660  type listREntry struct {
  1661  	id, path string
  1662  }
  1663  
  1664  // listRSlices is a helper struct to sort two slices at once
  1665  type listRSlices struct {
  1666  	dirs  []string
  1667  	paths []string
  1668  }
  1669  
  1670  func (s listRSlices) Sort() {
  1671  	sort.Sort(s)
  1672  }
  1673  
  1674  func (s listRSlices) Len() int {
  1675  	return len(s.dirs)
  1676  }
  1677  
  1678  func (s listRSlices) Swap(i, j int) {
  1679  	s.dirs[i], s.dirs[j] = s.dirs[j], s.dirs[i]
  1680  	s.paths[i], s.paths[j] = s.paths[j], s.paths[i]
  1681  }
  1682  
  1683  func (s listRSlices) Less(i, j int) bool {
  1684  	return s.dirs[i] < s.dirs[j]
  1685  }
  1686  
  1687  // listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry.
  1688  //
  1689  // In each cycle it will read up to grouping entries from the in channel without blocking.
  1690  // If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
  1691  // nil is send to the out channel and the function returns.
  1692  func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
  1693  	var dirs []string
  1694  	var paths []string
  1695  
  1696  	for dir := range in {
  1697  		dirs = append(dirs[:0], dir.id)
  1698  		paths = append(paths[:0], dir.path)
  1699  	waitloop:
  1700  		for i := 1; i < grouping; i++ {
  1701  			select {
  1702  			case d, ok := <-in:
  1703  				if !ok {
  1704  					break waitloop
  1705  				}
  1706  				dirs = append(dirs, d.id)
  1707  				paths = append(paths, d.path)
  1708  			default:
  1709  			}
  1710  		}
  1711  		listRSlices{dirs, paths}.Sort()
  1712  		var iErr error
  1713  		_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
  1714  			for _, parent := range item.Parents {
  1715  				var i int
  1716  				// If only one item in paths then no need to search for the ID
  1717  				// assuming google drive is doing its job properly.
  1718  				//
  1719  				// Note that we at the root when len(paths) == 1 && paths[0] == ""
  1720  				if len(paths) == 1 {
  1721  					// don't check parents at root because
  1722  					// - shared with me items have no parents at the root
  1723  					// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
  1724  					i = 0
  1725  				} else {
  1726  					// only handle parents that are in the requested dirs list if not at root
  1727  					i = sort.SearchStrings(dirs, parent)
  1728  					if i == len(dirs) || dirs[i] != parent {
  1729  						continue
  1730  					}
  1731  				}
  1732  				remote := path.Join(paths[i], item.Name)
  1733  				entry, err := f.itemToDirEntry(remote, item)
  1734  				if err != nil {
  1735  					iErr = err
  1736  					return true
  1737  				}
  1738  
  1739  				err = cb(entry)
  1740  				if err != nil {
  1741  					iErr = err
  1742  					return true
  1743  				}
  1744  			}
  1745  			return false
  1746  		})
  1747  		for range dirs {
  1748  			wg.Done()
  1749  		}
  1750  
  1751  		if iErr != nil {
  1752  			out <- iErr
  1753  			return
  1754  		}
  1755  
  1756  		if err != nil {
  1757  			out <- err
  1758  			return
  1759  		}
  1760  	}
  1761  	out <- nil
  1762  }
  1763  
  1764  // ListR lists the objects and directories of the Fs starting
  1765  // from dir recursively into out.
  1766  //
  1767  // dir should be "" to start from the root, and should not
  1768  // have trailing slashes.
  1769  //
  1770  // This should return ErrDirNotFound if the directory isn't
  1771  // found.
  1772  //
  1773  // It should call callback for each tranche of entries read.
  1774  // These need not be returned in any particular order.  If
  1775  // callback returns an error then the listing will stop
  1776  // immediately.
  1777  //
  1778  // Don't implement this unless you have a more efficient way
  1779  // of listing recursively that doing a directory traversal.
  1780  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1781  	const (
  1782  		grouping    = 50
  1783  		inputBuffer = 1000
  1784  	)
  1785  
  1786  	err = f.dirCache.FindRoot(ctx, false)
  1787  	if err != nil {
  1788  		return err
  1789  	}
  1790  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
  1791  	if err != nil {
  1792  		return err
  1793  	}
  1794  
  1795  	mu := sync.Mutex{} // protects in and overflow
  1796  	wg := sync.WaitGroup{}
  1797  	in := make(chan listREntry, inputBuffer)
  1798  	out := make(chan error, fs.Config.Checkers)
  1799  	list := walk.NewListRHelper(callback)
  1800  	overflow := []listREntry{}
  1801  	listed := 0
  1802  
  1803  	cb := func(entry fs.DirEntry) error {
  1804  		mu.Lock()
  1805  		defer mu.Unlock()
  1806  		if d, isDir := entry.(*fs.Dir); isDir && in != nil {
  1807  			select {
  1808  			case in <- listREntry{d.ID(), d.Remote()}:
  1809  				wg.Add(1)
  1810  			default:
  1811  				overflow = append(overflow, listREntry{d.ID(), d.Remote()})
  1812  			}
  1813  		}
  1814  		listed++
  1815  		return list.Add(entry)
  1816  	}
  1817  
  1818  	wg.Add(1)
  1819  	in <- listREntry{directoryID, dir}
  1820  
  1821  	for i := 0; i < fs.Config.Checkers; i++ {
  1822  		go f.listRRunner(ctx, &wg, in, out, cb, grouping)
  1823  	}
  1824  	go func() {
  1825  		// wait until the all directories are processed
  1826  		wg.Wait()
  1827  		// if the input channel overflowed add the collected entries to the channel now
  1828  		for len(overflow) > 0 {
  1829  			mu.Lock()
  1830  			l := len(overflow)
  1831  			// only fill half of the channel to prevent entries beeing put into overflow again
  1832  			if l > inputBuffer/2 {
  1833  				l = inputBuffer / 2
  1834  			}
  1835  			wg.Add(l)
  1836  			for _, d := range overflow[:l] {
  1837  				in <- d
  1838  			}
  1839  			overflow = overflow[l:]
  1840  			mu.Unlock()
  1841  
  1842  			// wait again for the completion of all directories
  1843  			wg.Wait()
  1844  		}
  1845  		mu.Lock()
  1846  		if in != nil {
  1847  			// notify all workers to exit
  1848  			close(in)
  1849  			in = nil
  1850  		}
  1851  		mu.Unlock()
  1852  	}()
  1853  	// wait until the all workers to finish
  1854  	for i := 0; i < fs.Config.Checkers; i++ {
  1855  		e := <-out
  1856  		mu.Lock()
  1857  		// if one worker returns an error early, close the input so all other workers exit
  1858  		if e != nil && in != nil {
  1859  			err = e
  1860  			close(in)
  1861  			in = nil
  1862  		}
  1863  		mu.Unlock()
  1864  	}
  1865  
  1866  	close(out)
  1867  	if err != nil {
  1868  		return err
  1869  	}
  1870  
  1871  	err = list.Flush()
  1872  	if err != nil {
  1873  		return err
  1874  	}
  1875  
  1876  	// If listing the root of a teamdrive and got no entries,
  1877  	// double check we have access
  1878  	if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
  1879  		err = f.teamDriveOK(ctx)
  1880  		if err != nil {
  1881  			return err
  1882  		}
  1883  	}
  1884  
  1885  	return nil
  1886  }
  1887  
  1888  // itemToDirEntry converts a drive.File to a fs.DirEntry.
  1889  // When the drive.File cannot be represented as a fs.DirEntry
  1890  // (nil, nil) is returned.
  1891  func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
  1892  	switch {
  1893  	case item.MimeType == driveFolderType:
  1894  		// cache the directory ID for later lookups
  1895  		f.dirCache.Put(remote, item.Id)
  1896  		when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
  1897  		d := fs.NewDir(remote, when).SetID(item.Id)
  1898  		return d, nil
  1899  	case f.opt.AuthOwnerOnly && !isAuthOwned(item):
  1900  		// ignore object
  1901  	default:
  1902  		return f.newObjectWithInfo(remote, item)
  1903  	}
  1904  	return nil, nil
  1905  }
  1906  
  1907  // Creates a drive.File info from the parameters passed in.
  1908  //
  1909  // Used to create new objects
  1910  func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) {
  1911  	leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
  1912  	if err != nil {
  1913  		return nil, err
  1914  	}
  1915  
  1916  	leaf = f.opt.Enc.FromStandardName(leaf)
  1917  	// Define the metadata for the file we are going to create.
  1918  	createInfo := &drive.File{
  1919  		Name:         leaf,
  1920  		Description:  leaf,
  1921  		Parents:      []string{directoryID},
  1922  		ModifiedTime: modTime.Format(timeFormatOut),
  1923  	}
  1924  	return createInfo, nil
  1925  }
  1926  
  1927  // Put the object
  1928  //
  1929  // Copy the reader in to the new object which is returned
  1930  //
  1931  // The new object may have been created if an error is returned
  1932  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1933  	exisitingObj, err := f.NewObject(ctx, src.Remote())
  1934  	switch err {
  1935  	case nil:
  1936  		return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
  1937  	case fs.ErrorObjectNotFound:
  1938  		// Not found so create it
  1939  		return f.PutUnchecked(ctx, in, src, options...)
  1940  	default:
  1941  		return nil, err
  1942  	}
  1943  }
  1944  
  1945  // PutStream uploads to the remote path with the modTime given of indeterminate size
  1946  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1947  	return f.Put(ctx, in, src, options...)
  1948  }
  1949  
  1950  // PutUnchecked uploads the object
  1951  //
  1952  // This will create a duplicate if we upload a new file without
  1953  // checking to see if there is one already - use Put() for that.
  1954  func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1955  	remote := src.Remote()
  1956  	size := src.Size()
  1957  	modTime := src.ModTime(ctx)
  1958  	srcMimeType := fs.MimeTypeFromName(remote)
  1959  	srcExt := path.Ext(remote)
  1960  	exportExt := ""
  1961  	importMimeType := ""
  1962  
  1963  	if f.importMimeTypes != nil && !f.opt.SkipGdocs {
  1964  		importMimeType = f.findImportFormat(srcMimeType)
  1965  
  1966  		if isInternalMimeType(importMimeType) {
  1967  			remote = remote[:len(remote)-len(srcExt)]
  1968  
  1969  			exportExt, _, _ = f.findExportFormatByMimeType(importMimeType)
  1970  			if exportExt == "" {
  1971  				return nil, errors.Errorf("No export format found for %q", importMimeType)
  1972  			}
  1973  			if exportExt != srcExt && !f.opt.AllowImportNameChange {
  1974  				return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
  1975  			}
  1976  		}
  1977  	}
  1978  
  1979  	createInfo, err := f.createFileInfo(ctx, remote, modTime)
  1980  	if err != nil {
  1981  		return nil, err
  1982  	}
  1983  	if importMimeType != "" {
  1984  		createInfo.MimeType = importMimeType
  1985  	} else {
  1986  		createInfo.MimeType = fs.MimeTypeFromName(remote)
  1987  	}
  1988  
  1989  	var info *drive.File
  1990  	if size >= 0 && size < int64(f.opt.UploadCutoff) {
  1991  		// Make the API request to upload metadata and file data.
  1992  		// Don't retry, return a retry error instead
  1993  		err = f.pacer.CallNoRetry(func() (bool, error) {
  1994  			info, err = f.svc.Files.Create(createInfo).
  1995  				Media(in, googleapi.ContentType(srcMimeType)).
  1996  				Fields(partialFields).
  1997  				SupportsAllDrives(true).
  1998  				KeepRevisionForever(f.opt.KeepRevisionForever).
  1999  				Do()
  2000  			return f.shouldRetry(err)
  2001  		})
  2002  		if err != nil {
  2003  			return nil, err
  2004  		}
  2005  	} else {
  2006  		// Upload the file in chunks
  2007  		info, err = f.Upload(ctx, in, size, srcMimeType, "", remote, createInfo)
  2008  		if err != nil {
  2009  			return nil, err
  2010  		}
  2011  	}
  2012  	return f.newObjectWithInfo(remote, info)
  2013  }
  2014  
  2015  // MergeDirs merges the contents of all the directories passed
  2016  // in into the first one and rmdirs the other directories.
  2017  func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
  2018  	if len(dirs) < 2 {
  2019  		return nil
  2020  	}
  2021  	dstDir := dirs[0]
  2022  	for _, srcDir := range dirs[1:] {
  2023  		// list the the objects
  2024  		infos := []*drive.File{}
  2025  		_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
  2026  			infos = append(infos, info)
  2027  			return false
  2028  		})
  2029  		if err != nil {
  2030  			return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
  2031  		}
  2032  		// move them into place
  2033  		for _, info := range infos {
  2034  			fs.Infof(srcDir, "merging %q", info.Name)
  2035  			// Move the file into the destination
  2036  			err = f.pacer.Call(func() (bool, error) {
  2037  				_, err = f.svc.Files.Update(info.Id, nil).
  2038  					RemoveParents(srcDir.ID()).
  2039  					AddParents(dstDir.ID()).
  2040  					Fields("").
  2041  					SupportsAllDrives(true).
  2042  					Do()
  2043  				return f.shouldRetry(err)
  2044  			})
  2045  			if err != nil {
  2046  				return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
  2047  			}
  2048  		}
  2049  		// rmdir (into trash) the now empty source directory
  2050  		fs.Infof(srcDir, "removing empty directory")
  2051  		err = f.rmdir(ctx, srcDir.ID(), true)
  2052  		if err != nil {
  2053  			return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
  2054  		}
  2055  	}
  2056  	return nil
  2057  }
  2058  
  2059  // Mkdir creates the container if it doesn't exist
  2060  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  2061  	err := f.dirCache.FindRoot(ctx, true)
  2062  	if err != nil {
  2063  		return err
  2064  	}
  2065  	if dir != "" {
  2066  		_, err = f.dirCache.FindDir(ctx, dir, true)
  2067  	}
  2068  	return err
  2069  }
  2070  
  2071  // Rmdir deletes a directory unconditionally by ID
  2072  func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error {
  2073  	return f.pacer.Call(func() (bool, error) {
  2074  		var err error
  2075  		if useTrash {
  2076  			info := drive.File{
  2077  				Trashed: true,
  2078  			}
  2079  			_, err = f.svc.Files.Update(directoryID, &info).
  2080  				Fields("").
  2081  				SupportsAllDrives(true).
  2082  				Do()
  2083  		} else {
  2084  			err = f.svc.Files.Delete(directoryID).
  2085  				Fields("").
  2086  				SupportsAllDrives(true).
  2087  				Do()
  2088  		}
  2089  		return f.shouldRetry(err)
  2090  	})
  2091  }
  2092  
  2093  // Rmdir deletes a directory
  2094  //
  2095  // Returns an error if it isn't empty
  2096  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  2097  	root := path.Join(f.root, dir)
  2098  	dc := f.dirCache
  2099  	directoryID, err := dc.FindDir(ctx, dir, false)
  2100  	if err != nil {
  2101  		return err
  2102  	}
  2103  	var trashedFiles = false
  2104  	found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
  2105  		if !item.Trashed {
  2106  			fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
  2107  			return true
  2108  		}
  2109  		fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
  2110  		trashedFiles = true
  2111  		return false
  2112  	})
  2113  	if err != nil {
  2114  		return err
  2115  	}
  2116  	if found {
  2117  		return errors.Errorf("directory not empty")
  2118  	}
  2119  	if root != "" {
  2120  		// trash the directory if it had trashed files
  2121  		// in or the user wants to trash, otherwise
  2122  		// delete it.
  2123  		err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash)
  2124  		if err != nil {
  2125  			return err
  2126  		}
  2127  	}
  2128  	f.dirCache.FlushDir(dir)
  2129  	if err != nil {
  2130  		return err
  2131  	}
  2132  	return nil
  2133  }
  2134  
  2135  // Precision of the object storage system
  2136  func (f *Fs) Precision() time.Duration {
  2137  	return time.Millisecond
  2138  }
  2139  
  2140  // Copy src to this remote using server side copy operations.
  2141  //
  2142  // This is stored with the remote path given
  2143  //
  2144  // It returns the destination Object and a possible error
  2145  //
  2146  // Will only be called if src.Fs().Name() == f.Name()
  2147  //
  2148  // If it isn't possible then return fs.ErrorCantCopy
  2149  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  2150  	var srcObj *baseObject
  2151  	ext := ""
  2152  	switch src := src.(type) {
  2153  	case *Object:
  2154  		srcObj = &src.baseObject
  2155  	case *documentObject:
  2156  		srcObj, ext = &src.baseObject, src.ext()
  2157  	case *linkObject:
  2158  		srcObj, ext = &src.baseObject, src.ext()
  2159  	default:
  2160  		fs.Debugf(src, "Can't copy - not same remote type")
  2161  		return nil, fs.ErrorCantCopy
  2162  	}
  2163  
  2164  	if ext != "" {
  2165  		if !strings.HasSuffix(remote, ext) {
  2166  			fs.Debugf(src, "Can't copy - not same document type")
  2167  			return nil, fs.ErrorCantCopy
  2168  		}
  2169  		remote = remote[:len(remote)-len(ext)]
  2170  	}
  2171  
  2172  	// Look to see if there is an existing object
  2173  	existingObject, _ := f.NewObject(ctx, remote)
  2174  
  2175  	createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
  2176  	if err != nil {
  2177  		return nil, err
  2178  	}
  2179  
  2180  	var info *drive.File
  2181  	err = f.pacer.Call(func() (bool, error) {
  2182  		info, err = f.svc.Files.Copy(srcObj.id, createInfo).
  2183  			Fields(partialFields).
  2184  			SupportsAllDrives(true).
  2185  			KeepRevisionForever(f.opt.KeepRevisionForever).
  2186  			Do()
  2187  		return f.shouldRetry(err)
  2188  	})
  2189  	if err != nil {
  2190  		return nil, err
  2191  	}
  2192  	newObject, err := f.newObjectWithInfo(remote, info)
  2193  	if err != nil {
  2194  		return nil, err
  2195  	}
  2196  	if existingObject != nil {
  2197  		err = existingObject.Remove(ctx)
  2198  		if err != nil {
  2199  			fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
  2200  		}
  2201  	}
  2202  	return newObject, nil
  2203  }
  2204  
  2205  // Purge deletes all the files and the container
  2206  //
  2207  // Optional interface: Only implement this if you have a way of
  2208  // deleting all the files quicker than just running Remove() on the
  2209  // result of List()
  2210  func (f *Fs) Purge(ctx context.Context) error {
  2211  	if f.root == "" {
  2212  		return errors.New("can't purge root directory")
  2213  	}
  2214  	if f.opt.TrashedOnly {
  2215  		return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
  2216  	}
  2217  	err := f.dirCache.FindRoot(ctx, false)
  2218  	if err != nil {
  2219  		return err
  2220  	}
  2221  	err = f.pacer.Call(func() (bool, error) {
  2222  		if f.opt.UseTrash {
  2223  			info := drive.File{
  2224  				Trashed: true,
  2225  			}
  2226  			_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
  2227  				Fields("").
  2228  				SupportsAllDrives(true).
  2229  				Do()
  2230  		} else {
  2231  			err = f.svc.Files.Delete(f.dirCache.RootID()).
  2232  				Fields("").
  2233  				SupportsAllDrives(true).
  2234  				Do()
  2235  		}
  2236  		return f.shouldRetry(err)
  2237  	})
  2238  	f.dirCache.ResetRoot()
  2239  	if err != nil {
  2240  		return err
  2241  	}
  2242  	return nil
  2243  }
  2244  
  2245  // CleanUp empties the trash
  2246  func (f *Fs) CleanUp(ctx context.Context) error {
  2247  	err := f.pacer.Call(func() (bool, error) {
  2248  		err := f.svc.Files.EmptyTrash().Context(ctx).Do()
  2249  		return f.shouldRetry(err)
  2250  	})
  2251  
  2252  	if err != nil {
  2253  		return err
  2254  	}
  2255  	return nil
  2256  }
  2257  
  2258  // teamDriveOK checks to see if we can access the team drive
  2259  func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
  2260  	if !f.isTeamDrive {
  2261  		return nil
  2262  	}
  2263  	var td *drive.Drive
  2264  	err = f.pacer.Call(func() (bool, error) {
  2265  		td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
  2266  		return f.shouldRetry(err)
  2267  	})
  2268  	if err != nil {
  2269  		return errors.Wrap(err, "failed to get Team/Shared Drive info")
  2270  	}
  2271  	fs.Debugf(f, "read info from team drive %q", td.Name)
  2272  	return err
  2273  }
  2274  
  2275  // About gets quota information
  2276  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  2277  	if f.isTeamDrive {
  2278  		err := f.teamDriveOK(ctx)
  2279  		if err != nil {
  2280  			return nil, err
  2281  		}
  2282  		// Teamdrives don't appear to have a usage API so just return empty
  2283  		return &fs.Usage{}, nil
  2284  	}
  2285  	var about *drive.About
  2286  	var err error
  2287  	err = f.pacer.Call(func() (bool, error) {
  2288  		about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
  2289  		return f.shouldRetry(err)
  2290  	})
  2291  	if err != nil {
  2292  		return nil, errors.Wrap(err, "failed to get Drive storageQuota")
  2293  	}
  2294  	q := about.StorageQuota
  2295  	usage := &fs.Usage{
  2296  		Used:    fs.NewUsageValue(q.UsageInDrive),           // bytes in use
  2297  		Trashed: fs.NewUsageValue(q.UsageInDriveTrash),      // bytes in trash
  2298  		Other:   fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage eg gmail in drive
  2299  	}
  2300  	if q.Limit > 0 {
  2301  		usage.Total = fs.NewUsageValue(q.Limit)          // quota of bytes that can be used
  2302  		usage.Free = fs.NewUsageValue(q.Limit - q.Usage) // bytes which can be uploaded before reaching the quota
  2303  	}
  2304  	return usage, nil
  2305  }
  2306  
  2307  // Move src to this remote using server side move operations.
  2308  //
  2309  // This is stored with the remote path given
  2310  //
  2311  // It returns the destination Object and a possible error
  2312  //
  2313  // Will only be called if src.Fs().Name() == f.Name()
  2314  //
  2315  // If it isn't possible then return fs.ErrorCantMove
  2316  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  2317  	var srcObj *baseObject
  2318  	ext := ""
  2319  	switch src := src.(type) {
  2320  	case *Object:
  2321  		srcObj = &src.baseObject
  2322  	case *documentObject:
  2323  		srcObj, ext = &src.baseObject, src.ext()
  2324  	case *linkObject:
  2325  		srcObj, ext = &src.baseObject, src.ext()
  2326  	default:
  2327  		fs.Debugf(src, "Can't move - not same remote type")
  2328  		return nil, fs.ErrorCantMove
  2329  	}
  2330  
  2331  	if ext != "" {
  2332  		if !strings.HasSuffix(remote, ext) {
  2333  			fs.Debugf(src, "Can't move - not same document type")
  2334  			return nil, fs.ErrorCantMove
  2335  		}
  2336  		remote = remote[:len(remote)-len(ext)]
  2337  	}
  2338  
  2339  	_, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false)
  2340  	if err != nil {
  2341  		return nil, err
  2342  	}
  2343  
  2344  	// Temporary Object under construction
  2345  	dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
  2346  	if err != nil {
  2347  		return nil, err
  2348  	}
  2349  	dstParents := strings.Join(dstInfo.Parents, ",")
  2350  	dstInfo.Parents = nil
  2351  
  2352  	// Do the move
  2353  	var info *drive.File
  2354  	err = f.pacer.Call(func() (bool, error) {
  2355  		info, err = f.svc.Files.Update(srcObj.id, dstInfo).
  2356  			RemoveParents(srcParentID).
  2357  			AddParents(dstParents).
  2358  			Fields(partialFields).
  2359  			SupportsAllDrives(true).
  2360  			Do()
  2361  		return f.shouldRetry(err)
  2362  	})
  2363  	if err != nil {
  2364  		return nil, err
  2365  	}
  2366  
  2367  	return f.newObjectWithInfo(remote, info)
  2368  }
  2369  
  2370  // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
  2371  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
  2372  	id, err := f.dirCache.FindDir(ctx, remote, false)
  2373  	if err == nil {
  2374  		fs.Debugf(f, "attempting to share directory '%s'", remote)
  2375  	} else {
  2376  		fs.Debugf(f, "attempting to share single file '%s'", remote)
  2377  		o, err := f.NewObject(ctx, remote)
  2378  		if err != nil {
  2379  			return "", err
  2380  		}
  2381  		id = o.(fs.IDer).ID()
  2382  	}
  2383  
  2384  	permission := &drive.Permission{
  2385  		AllowFileDiscovery: false,
  2386  		Role:               "reader",
  2387  		Type:               "anyone",
  2388  	}
  2389  
  2390  	err = f.pacer.Call(func() (bool, error) {
  2391  		// TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
  2392  		// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
  2393  		_, err = f.svc.Permissions.Create(id, permission).
  2394  			Fields("").
  2395  			SupportsAllDrives(true).
  2396  			Do()
  2397  		return f.shouldRetry(err)
  2398  	})
  2399  	if err != nil {
  2400  		return "", err
  2401  	}
  2402  	return fmt.Sprintf("https://drive.google.com/open?id=%s", id), nil
  2403  }
  2404  
  2405  // DirMove moves src, srcRemote to this remote at dstRemote
  2406  // using server side move operations.
  2407  //
  2408  // Will only be called if src.Fs().Name() == f.Name()
  2409  //
  2410  // If it isn't possible then return fs.ErrorCantDirMove
  2411  //
  2412  // If destination exists then return fs.ErrorDirExists
  2413  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  2414  	srcFs, ok := src.(*Fs)
  2415  	if !ok {
  2416  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  2417  		return fs.ErrorCantDirMove
  2418  	}
  2419  	srcPath := path.Join(srcFs.root, srcRemote)
  2420  	dstPath := path.Join(f.root, dstRemote)
  2421  
  2422  	// Refuse to move to or from the root
  2423  	if srcPath == "" || dstPath == "" {
  2424  		fs.Debugf(src, "DirMove error: Can't move root")
  2425  		return errors.New("can't move root directory")
  2426  	}
  2427  
  2428  	// find the root src directory
  2429  	err := srcFs.dirCache.FindRoot(ctx, false)
  2430  	if err != nil {
  2431  		return err
  2432  	}
  2433  
  2434  	// find the root dst directory
  2435  	if dstRemote != "" {
  2436  		err = f.dirCache.FindRoot(ctx, true)
  2437  		if err != nil {
  2438  			return err
  2439  		}
  2440  	} else {
  2441  		if f.dirCache.FoundRoot() {
  2442  			return fs.ErrorDirExists
  2443  		}
  2444  	}
  2445  
  2446  	// Find ID of dst parent, creating subdirs if necessary
  2447  	var leaf, dstDirectoryID string
  2448  	findPath := dstRemote
  2449  	if dstRemote == "" {
  2450  		findPath = f.root
  2451  	}
  2452  	leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
  2453  	if err != nil {
  2454  		return err
  2455  	}
  2456  
  2457  	// Check destination does not exist
  2458  	if dstRemote != "" {
  2459  		_, err = f.dirCache.FindDir(ctx, dstRemote, false)
  2460  		if err == fs.ErrorDirNotFound {
  2461  			// OK
  2462  		} else if err != nil {
  2463  			return err
  2464  		} else {
  2465  			return fs.ErrorDirExists
  2466  		}
  2467  	}
  2468  
  2469  	// Find ID of src parent
  2470  	var srcDirectoryID string
  2471  	if srcRemote == "" {
  2472  		srcDirectoryID, err = srcFs.dirCache.RootParentID()
  2473  	} else {
  2474  		_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false)
  2475  	}
  2476  	if err != nil {
  2477  		return err
  2478  	}
  2479  
  2480  	// Find ID of src
  2481  	srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
  2482  	if err != nil {
  2483  		return err
  2484  	}
  2485  
  2486  	// Do the move
  2487  	patch := drive.File{
  2488  		Name: leaf,
  2489  	}
  2490  	err = f.pacer.Call(func() (bool, error) {
  2491  		_, err = f.svc.Files.Update(srcID, &patch).
  2492  			RemoveParents(srcDirectoryID).
  2493  			AddParents(dstDirectoryID).
  2494  			Fields("").
  2495  			SupportsAllDrives(true).
  2496  			Do()
  2497  		return f.shouldRetry(err)
  2498  	})
  2499  	if err != nil {
  2500  		return err
  2501  	}
  2502  	srcFs.dirCache.FlushDir(srcRemote)
  2503  	return nil
  2504  }
  2505  
  2506  // ChangeNotify calls the passed function with a path that has had changes.
  2507  // If the implementation uses polling, it should adhere to the given interval.
  2508  //
  2509  // Automatically restarts itself in case of unexpected behavior of the remote.
  2510  //
  2511  // Close the returned channel to stop being notified.
  2512  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
  2513  	go func() {
  2514  		// get the StartPageToken early so all changes from now on get processed
  2515  		startPageToken, err := f.changeNotifyStartPageToken()
  2516  		if err != nil {
  2517  			fs.Infof(f, "Failed to get StartPageToken: %s", err)
  2518  		}
  2519  		var ticker *time.Ticker
  2520  		var tickerC <-chan time.Time
  2521  		for {
  2522  			select {
  2523  			case pollInterval, ok := <-pollIntervalChan:
  2524  				if !ok {
  2525  					if ticker != nil {
  2526  						ticker.Stop()
  2527  					}
  2528  					return
  2529  				}
  2530  				if ticker != nil {
  2531  					ticker.Stop()
  2532  					ticker, tickerC = nil, nil
  2533  				}
  2534  				if pollInterval != 0 {
  2535  					ticker = time.NewTicker(pollInterval)
  2536  					tickerC = ticker.C
  2537  				}
  2538  			case <-tickerC:
  2539  				if startPageToken == "" {
  2540  					startPageToken, err = f.changeNotifyStartPageToken()
  2541  					if err != nil {
  2542  						fs.Infof(f, "Failed to get StartPageToken: %s", err)
  2543  						continue
  2544  					}
  2545  				}
  2546  				fs.Debugf(f, "Checking for changes on remote")
  2547  				startPageToken, err = f.changeNotifyRunner(ctx, notifyFunc, startPageToken)
  2548  				if err != nil {
  2549  					fs.Infof(f, "Change notify listener failure: %s", err)
  2550  				}
  2551  			}
  2552  		}
  2553  	}()
  2554  }
  2555  func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
  2556  	var startPageToken *drive.StartPageToken
  2557  	err = f.pacer.Call(func() (bool, error) {
  2558  		changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
  2559  		if f.isTeamDrive {
  2560  			changes.DriveId(f.opt.TeamDriveID)
  2561  		}
  2562  		startPageToken, err = changes.Do()
  2563  		return f.shouldRetry(err)
  2564  	})
  2565  	if err != nil {
  2566  		return
  2567  	}
  2568  	return startPageToken.StartPageToken, nil
  2569  }
  2570  
  2571  func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
  2572  	pageToken := startPageToken
  2573  	for {
  2574  		var changeList *drive.ChangeList
  2575  
  2576  		err = f.pacer.Call(func() (bool, error) {
  2577  			changesCall := f.svc.Changes.List(pageToken).
  2578  				Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
  2579  			if f.opt.ListChunk > 0 {
  2580  				changesCall.PageSize(f.opt.ListChunk)
  2581  			}
  2582  			changesCall.SupportsAllDrives(true)
  2583  			changesCall.IncludeItemsFromAllDrives(true)
  2584  			if f.isTeamDrive {
  2585  				changesCall.DriveId(f.opt.TeamDriveID)
  2586  			}
  2587  			// If using appDataFolder then need to add Spaces
  2588  			if f.rootFolderID == "appDataFolder" {
  2589  				changesCall.Spaces("appDataFolder")
  2590  			}
  2591  			changeList, err = changesCall.Context(ctx).Do()
  2592  			return f.shouldRetry(err)
  2593  		})
  2594  		if err != nil {
  2595  			return
  2596  		}
  2597  
  2598  		type entryType struct {
  2599  			path      string
  2600  			entryType fs.EntryType
  2601  		}
  2602  		var pathsToClear []entryType
  2603  		for _, change := range changeList.Changes {
  2604  			// find the previous path
  2605  			if path, ok := f.dirCache.GetInv(change.FileId); ok {
  2606  				if change.File != nil && change.File.MimeType != driveFolderType {
  2607  					pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
  2608  				} else {
  2609  					pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
  2610  				}
  2611  			}
  2612  
  2613  			// find the new path
  2614  			if change.File != nil {
  2615  				change.File.Name = f.opt.Enc.ToStandardName(change.File.Name)
  2616  				changeType := fs.EntryDirectory
  2617  				if change.File.MimeType != driveFolderType {
  2618  					changeType = fs.EntryObject
  2619  				}
  2620  
  2621  				// translate the parent dir of this object
  2622  				if len(change.File.Parents) > 0 {
  2623  					for _, parent := range change.File.Parents {
  2624  						if parentPath, ok := f.dirCache.GetInv(parent); ok {
  2625  							// and append the drive file name to compute the full file name
  2626  							newPath := path.Join(parentPath, change.File.Name)
  2627  							// this will now clear the actual file too
  2628  							pathsToClear = append(pathsToClear, entryType{path: newPath, entryType: changeType})
  2629  						}
  2630  					}
  2631  				} else { // a true root object that is changed
  2632  					pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: changeType})
  2633  				}
  2634  			}
  2635  		}
  2636  
  2637  		visitedPaths := make(map[string]struct{})
  2638  		for _, entry := range pathsToClear {
  2639  			if _, ok := visitedPaths[entry.path]; ok {
  2640  				continue
  2641  			}
  2642  			visitedPaths[entry.path] = struct{}{}
  2643  			notifyFunc(entry.path, entry.entryType)
  2644  		}
  2645  
  2646  		switch {
  2647  		case changeList.NewStartPageToken != "":
  2648  			return changeList.NewStartPageToken, nil
  2649  		case changeList.NextPageToken != "":
  2650  			pageToken = changeList.NextPageToken
  2651  		default:
  2652  			return
  2653  		}
  2654  	}
  2655  }
  2656  
  2657  // DirCacheFlush resets the directory cache - used in testing as an
  2658  // optional interface
  2659  func (f *Fs) DirCacheFlush() {
  2660  	f.dirCache.ResetRoot()
  2661  }
  2662  
  2663  // Hashes returns the supported hash sets.
  2664  func (f *Fs) Hashes() hash.Set {
  2665  	return hash.Set(hash.MD5)
  2666  }
  2667  
  2668  // ------------------------------------------------------------
  2669  
  2670  // Fs returns the parent Fs
  2671  func (o *baseObject) Fs() fs.Info {
  2672  	return o.fs
  2673  }
  2674  
  2675  // Return a string version
  2676  func (o *baseObject) String() string {
  2677  	return o.remote
  2678  }
  2679  
  2680  // Return a string version
  2681  func (o *Object) String() string {
  2682  	if o == nil {
  2683  		return "<nil>"
  2684  	}
  2685  	return o.remote
  2686  }
  2687  
  2688  // Remote returns the remote path
  2689  func (o *baseObject) Remote() string {
  2690  	return o.remote
  2691  }
  2692  
  2693  // Hash returns the Md5sum of an object returning a lowercase hex string
  2694  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  2695  	if t != hash.MD5 {
  2696  		return "", hash.ErrUnsupported
  2697  	}
  2698  	return o.md5sum, nil
  2699  }
  2700  func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
  2701  	if t != hash.MD5 {
  2702  		return "", hash.ErrUnsupported
  2703  	}
  2704  	return "", nil
  2705  }
  2706  
  2707  // Size returns the size of an object in bytes
  2708  func (o *baseObject) Size() int64 {
  2709  	return o.bytes
  2710  }
  2711  
  2712  // getRemoteInfo returns a drive.File for the remote
  2713  func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
  2714  	info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
  2715  	return
  2716  }
  2717  
  2718  // getRemoteInfoWithExport returns a drive.File and the export settings for the remote
  2719  func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
  2720  	info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
  2721  	leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
  2722  	if err != nil {
  2723  		if err == fs.ErrorDirNotFound {
  2724  			return nil, "", "", "", false, fs.ErrorObjectNotFound
  2725  		}
  2726  		return nil, "", "", "", false, err
  2727  	}
  2728  
  2729  	found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
  2730  		if !f.opt.SkipGdocs {
  2731  			extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
  2732  			if exportName == leaf {
  2733  				info = item
  2734  				return true
  2735  			}
  2736  			if isDocument {
  2737  				return false
  2738  			}
  2739  		}
  2740  		if item.Name == leaf {
  2741  			info = item
  2742  			return true
  2743  		}
  2744  		return false
  2745  	})
  2746  	if err != nil {
  2747  		return nil, "", "", "", false, err
  2748  	}
  2749  	if !found {
  2750  		return nil, "", "", "", false, fs.ErrorObjectNotFound
  2751  	}
  2752  	return
  2753  }
  2754  
  2755  // ModTime returns the modification time of the object
  2756  //
  2757  //
  2758  // It attempts to read the objects mtime and if that isn't present the
  2759  // LastModified returned in the http headers
  2760  func (o *baseObject) ModTime(ctx context.Context) time.Time {
  2761  	modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
  2762  	if err != nil {
  2763  		fs.Debugf(o, "Failed to read mtime from object: %v", err)
  2764  		return time.Now()
  2765  	}
  2766  	return modTime
  2767  }
  2768  
  2769  // SetModTime sets the modification time of the drive fs object
  2770  func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
  2771  	// New metadata
  2772  	updateInfo := &drive.File{
  2773  		ModifiedTime: modTime.Format(timeFormatOut),
  2774  	}
  2775  	// Set modified date
  2776  	var info *drive.File
  2777  	err := o.fs.pacer.Call(func() (bool, error) {
  2778  		var err error
  2779  		info, err = o.fs.svc.Files.Update(o.id, updateInfo).
  2780  			Fields(partialFields).
  2781  			SupportsAllDrives(true).
  2782  			Do()
  2783  		return o.fs.shouldRetry(err)
  2784  	})
  2785  	if err != nil {
  2786  		return err
  2787  	}
  2788  	// Update info from read data
  2789  	o.modifiedDate = info.ModifiedTime
  2790  	return nil
  2791  }
  2792  
  2793  // Storable returns a boolean as to whether this object is storable
  2794  func (o *baseObject) Storable() bool {
  2795  	return true
  2796  }
  2797  
  2798  // httpResponse gets an http.Response object for the object
  2799  // using the url and method passed in
  2800  func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
  2801  	if url == "" {
  2802  		return nil, nil, errors.New("forbidden to download - check sharing permission")
  2803  	}
  2804  	req, err = http.NewRequest(method, url, nil)
  2805  	if err != nil {
  2806  		return req, nil, err
  2807  	}
  2808  	req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
  2809  	fs.OpenOptionAddHTTPHeaders(req.Header, options)
  2810  	if o.bytes == 0 {
  2811  		// Don't supply range requests for 0 length objects as they always fail
  2812  		delete(req.Header, "Range")
  2813  	}
  2814  	err = o.fs.pacer.Call(func() (bool, error) {
  2815  		res, err = o.fs.client.Do(req)
  2816  		if err == nil {
  2817  			err = googleapi.CheckResponse(res)
  2818  			if err != nil {
  2819  				_ = res.Body.Close() // ignore error
  2820  			}
  2821  		}
  2822  		return o.fs.shouldRetry(err)
  2823  	})
  2824  	if err != nil {
  2825  		return req, nil, err
  2826  	}
  2827  	return req, res, nil
  2828  }
  2829  
  2830  // openDocumentFile represents an documentObject open for reading.
  2831  // Updates the object size after read successfully.
  2832  type openDocumentFile struct {
  2833  	o       *documentObject // Object we are reading for
  2834  	in      io.ReadCloser   // reading from here
  2835  	bytes   int64           // number of bytes read on this connection
  2836  	eof     bool            // whether we have read end of file
  2837  	errored bool            // whether we have encountered an error during reading
  2838  }
  2839  
  2840  // Read bytes from the object - see io.Reader
  2841  func (file *openDocumentFile) Read(p []byte) (n int, err error) {
  2842  	n, err = file.in.Read(p)
  2843  	file.bytes += int64(n)
  2844  	if err != nil && err != io.EOF {
  2845  		file.errored = true
  2846  	}
  2847  	if err == io.EOF {
  2848  		file.eof = true
  2849  	}
  2850  	return
  2851  }
  2852  
  2853  // Close the object and update bytes read
  2854  func (file *openDocumentFile) Close() (err error) {
  2855  	// If end of file, update bytes read
  2856  	if file.eof && !file.errored {
  2857  		fs.Debugf(file.o, "Updating size of doc after download to %v", file.bytes)
  2858  		file.o.bytes = file.bytes
  2859  	}
  2860  	return file.in.Close()
  2861  }
  2862  
  2863  // Check it satisfies the interfaces
  2864  var _ io.ReadCloser = (*openDocumentFile)(nil)
  2865  
  2866  // Checks to see if err is a googleapi.Error with of type what
  2867  func isGoogleError(err error, what string) bool {
  2868  	if gerr, ok := err.(*googleapi.Error); ok {
  2869  		for _, error := range gerr.Errors {
  2870  			if error.Reason == what {
  2871  				return true
  2872  			}
  2873  		}
  2874  	}
  2875  	return false
  2876  }
  2877  
  2878  // open a url for reading
  2879  func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2880  	_, res, err := o.httpResponse(ctx, url, "GET", options)
  2881  	if err != nil {
  2882  		if isGoogleError(err, "cannotDownloadAbusiveFile") {
  2883  			if o.fs.opt.AcknowledgeAbuse {
  2884  				// Retry acknowledging abuse
  2885  				if strings.ContainsRune(url, '?') {
  2886  					url += "&"
  2887  				} else {
  2888  					url += "?"
  2889  				}
  2890  				url += "acknowledgeAbuse=true"
  2891  				_, res, err = o.httpResponse(ctx, url, "GET", options)
  2892  			} else {
  2893  				err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
  2894  			}
  2895  		}
  2896  		if err != nil {
  2897  			return nil, errors.Wrap(err, "open file failed")
  2898  		}
  2899  	}
  2900  	return res.Body, nil
  2901  }
  2902  
  2903  // Open an object for read
  2904  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2905  	if o.v2Download {
  2906  		var v2File *drive_v2.File
  2907  		err = o.fs.pacer.Call(func() (bool, error) {
  2908  			v2File, err = o.fs.v2Svc.Files.Get(o.id).
  2909  				Fields("downloadUrl").
  2910  				SupportsAllDrives(true).
  2911  				Do()
  2912  			return o.fs.shouldRetry(err)
  2913  		})
  2914  		if err == nil {
  2915  			fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
  2916  			o.url = v2File.DownloadUrl
  2917  			o.v2Download = false
  2918  		}
  2919  	}
  2920  	return o.baseObject.open(ctx, o.url, options...)
  2921  }
  2922  func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2923  	// Update the size with what we are reading as it can change from
  2924  	// the HEAD in the listing to this GET. This stops rclone marking
  2925  	// the transfer as corrupted.
  2926  	var offset, end int64 = 0, -1
  2927  	var newOptions = options[:0]
  2928  	for _, o := range options {
  2929  		// Note that Range requests don't work on Google docs:
  2930  		// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
  2931  		// So do a subset of them manually
  2932  		switch x := o.(type) {
  2933  		case *fs.RangeOption:
  2934  			offset, end = x.Start, x.End
  2935  		case *fs.SeekOption:
  2936  			offset, end = x.Offset, -1
  2937  		default:
  2938  			newOptions = append(newOptions, o)
  2939  		}
  2940  	}
  2941  	options = newOptions
  2942  	if offset != 0 {
  2943  		return nil, errors.New("partial downloads are not supported while exporting Google Documents")
  2944  	}
  2945  	in, err = o.baseObject.open(ctx, o.url, options...)
  2946  	if in != nil {
  2947  		in = &openDocumentFile{o: o, in: in}
  2948  	}
  2949  	if end >= 0 {
  2950  		in = readers.NewLimitedReadCloser(in, end-offset+1)
  2951  	}
  2952  	return
  2953  }
  2954  func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2955  	var offset, limit int64 = 0, -1
  2956  	var data = o.content
  2957  	for _, option := range options {
  2958  		switch x := option.(type) {
  2959  		case *fs.SeekOption:
  2960  			offset = x.Offset
  2961  		case *fs.RangeOption:
  2962  			offset, limit = x.Decode(int64(len(data)))
  2963  		default:
  2964  			if option.Mandatory() {
  2965  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  2966  			}
  2967  		}
  2968  	}
  2969  	if l := int64(len(data)); offset > l {
  2970  		offset = l
  2971  	}
  2972  	data = data[offset:]
  2973  	if limit != -1 && limit < int64(len(data)) {
  2974  		data = data[:limit]
  2975  	}
  2976  
  2977  	return ioutil.NopCloser(bytes.NewReader(data)), nil
  2978  }
  2979  
  2980  func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
  2981  	src fs.ObjectInfo) (info *drive.File, err error) {
  2982  	// Make the API request to upload metadata and file data.
  2983  	size := src.Size()
  2984  	if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
  2985  		// Don't retry, return a retry error instead
  2986  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  2987  			info, err = o.fs.svc.Files.Update(o.id, updateInfo).
  2988  				Media(in, googleapi.ContentType(uploadMimeType)).
  2989  				Fields(partialFields).
  2990  				SupportsAllDrives(true).
  2991  				KeepRevisionForever(o.fs.opt.KeepRevisionForever).
  2992  				Do()
  2993  			return o.fs.shouldRetry(err)
  2994  		})
  2995  		return
  2996  	}
  2997  	// Upload the file in chunks
  2998  	return o.fs.Upload(ctx, in, size, uploadMimeType, o.id, o.remote, updateInfo)
  2999  }
  3000  
  3001  // Update the already existing object
  3002  //
  3003  // Copy the reader into the object updating modTime and size
  3004  //
  3005  // The new object may have been created if an error is returned
  3006  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  3007  	srcMimeType := fs.MimeType(ctx, src)
  3008  	updateInfo := &drive.File{
  3009  		MimeType:     srcMimeType,
  3010  		ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
  3011  	}
  3012  	info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
  3013  	if err != nil {
  3014  		return err
  3015  	}
  3016  	newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
  3017  	if err != nil {
  3018  		return err
  3019  	}
  3020  	switch newO := newO.(type) {
  3021  	case *Object:
  3022  		*o = *newO
  3023  	default:
  3024  		return errors.New("object type changed by update")
  3025  	}
  3026  
  3027  	return nil
  3028  }
  3029  func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  3030  	srcMimeType := fs.MimeType(ctx, src)
  3031  	importMimeType := ""
  3032  	updateInfo := &drive.File{
  3033  		MimeType:     srcMimeType,
  3034  		ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
  3035  	}
  3036  
  3037  	if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
  3038  		return errors.Errorf("can't update google document type without --drive-import-formats")
  3039  	}
  3040  	importMimeType = o.fs.findImportFormat(updateInfo.MimeType)
  3041  	if importMimeType == "" {
  3042  		return errors.Errorf("no import format found for %q", srcMimeType)
  3043  	}
  3044  	if importMimeType != o.documentMimeType {
  3045  		return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
  3046  	}
  3047  	updateInfo.MimeType = importMimeType
  3048  
  3049  	info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
  3050  	if err != nil {
  3051  		return err
  3052  	}
  3053  
  3054  	remote := src.Remote()
  3055  	remote = remote[:len(remote)-o.extLen]
  3056  
  3057  	newO, err := o.fs.newObjectWithInfo(remote, info)
  3058  	if err != nil {
  3059  		return err
  3060  	}
  3061  	switch newO := newO.(type) {
  3062  	case *documentObject:
  3063  		*o = *newO
  3064  	default:
  3065  		return errors.New("object type changed by update")
  3066  	}
  3067  
  3068  	return nil
  3069  }
  3070  
  3071  func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  3072  	return errors.New("cannot update link files")
  3073  }
  3074  
  3075  // Remove an object
  3076  func (o *baseObject) Remove(ctx context.Context) error {
  3077  	var err error
  3078  	err = o.fs.pacer.Call(func() (bool, error) {
  3079  		if o.fs.opt.UseTrash {
  3080  			info := drive.File{
  3081  				Trashed: true,
  3082  			}
  3083  			_, err = o.fs.svc.Files.Update(o.id, &info).
  3084  				Fields("").
  3085  				SupportsAllDrives(true).
  3086  				Do()
  3087  		} else {
  3088  			err = o.fs.svc.Files.Delete(o.id).
  3089  				Fields("").
  3090  				SupportsAllDrives(true).
  3091  				Do()
  3092  		}
  3093  		return o.fs.shouldRetry(err)
  3094  	})
  3095  	return err
  3096  }
  3097  
  3098  // MimeType of an Object if known, "" otherwise
  3099  func (o *baseObject) MimeType(ctx context.Context) string {
  3100  	return o.mimeType
  3101  }
  3102  
  3103  // ID returns the ID of the Object if known, or "" if not
  3104  func (o *baseObject) ID() string {
  3105  	return o.id
  3106  }
  3107  
  3108  func (o *documentObject) ext() string {
  3109  	return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
  3110  }
  3111  func (o *linkObject) ext() string {
  3112  	return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
  3113  }
  3114  
  3115  // templates for document link files
  3116  const (
  3117  	urlTemplate = `[InternetShortcut]{{"\r"}}
  3118  URL={{ .URL }}{{"\r"}}
  3119  `
  3120  	weblocTemplate = `<?xml version="1.0" encoding="UTF-8"?>
  3121  <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
  3122  <plist version="1.0">
  3123    <dict>
  3124      <key>URL</key>
  3125      <string>{{ .URL }}</string>
  3126    </dict>
  3127  </plist>
  3128  `
  3129  	desktopTemplate = `[Desktop Entry]
  3130  Encoding=UTF-8
  3131  Name={{ .Title }}
  3132  URL={{ .URL }}
  3133  Icon=text-html
  3134  Type=Link
  3135  `
  3136  	htmlTemplate = `<html>
  3137  <head>
  3138    <meta http-equiv="refresh" content="0; url={{ .URL }}" />
  3139    <title>{{ .Title }}</title>
  3140  </head>
  3141  <body>
  3142    Loading <a href="{{ .URL }}">{{ .Title }}</a>
  3143  </body>
  3144  </html>
  3145  `
  3146  )
  3147  
  3148  // Check the interfaces are satisfied
  3149  var (
  3150  	_ fs.Fs              = (*Fs)(nil)
  3151  	_ fs.Purger          = (*Fs)(nil)
  3152  	_ fs.CleanUpper      = (*Fs)(nil)
  3153  	_ fs.PutStreamer     = (*Fs)(nil)
  3154  	_ fs.Copier          = (*Fs)(nil)
  3155  	_ fs.Mover           = (*Fs)(nil)
  3156  	_ fs.DirMover        = (*Fs)(nil)
  3157  	_ fs.DirCacheFlusher = (*Fs)(nil)
  3158  	_ fs.ChangeNotifier  = (*Fs)(nil)
  3159  	_ fs.PutUncheckeder  = (*Fs)(nil)
  3160  	_ fs.PublicLinker    = (*Fs)(nil)
  3161  	_ fs.ListRer         = (*Fs)(nil)
  3162  	_ fs.MergeDirser     = (*Fs)(nil)
  3163  	_ fs.Abouter         = (*Fs)(nil)
  3164  	_ fs.Object          = (*Object)(nil)
  3165  	_ fs.MimeTyper       = (*Object)(nil)
  3166  	_ fs.IDer            = (*Object)(nil)
  3167  	_ fs.Object          = (*documentObject)(nil)
  3168  	_ fs.MimeTyper       = (*documentObject)(nil)
  3169  	_ fs.IDer            = (*documentObject)(nil)
  3170  	_ fs.Object          = (*linkObject)(nil)
  3171  	_ fs.MimeTyper       = (*linkObject)(nil)
  3172  	_ fs.IDer            = (*linkObject)(nil)
  3173  )