github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/drive/drive.go (about)

     1  // Package drive interfaces with the Google Drive object storage system
     2  package drive
     3  
     4  // FIXME need to deal with some corner cases
     5  // * multiple files with the same name
     6  // * files can be in multiple directories
     7  // * can have directory loops
     8  // * files with / in name
     9  
    10  import (
    11  	"bytes"
    12  	"context"
    13  	"crypto/tls"
    14  	"fmt"
    15  	"io"
    16  	"io/ioutil"
    17  	"log"
    18  	"mime"
    19  	"net/http"
    20  	"net/url"
    21  	"os"
    22  	"path"
    23  	"sort"
    24  	"strconv"
    25  	"strings"
    26  	"sync"
    27  	"sync/atomic"
    28  	"text/template"
    29  	"time"
    30  
    31  	"github.com/pkg/errors"
    32  	"github.com/rclone/rclone/fs"
    33  	"github.com/rclone/rclone/fs/cache"
    34  	"github.com/rclone/rclone/fs/config"
    35  	"github.com/rclone/rclone/fs/config/configmap"
    36  	"github.com/rclone/rclone/fs/config/configstruct"
    37  	"github.com/rclone/rclone/fs/config/obscure"
    38  	"github.com/rclone/rclone/fs/fserrors"
    39  	"github.com/rclone/rclone/fs/fshttp"
    40  	"github.com/rclone/rclone/fs/hash"
    41  	"github.com/rclone/rclone/fs/walk"
    42  	"github.com/rclone/rclone/lib/dircache"
    43  	"github.com/rclone/rclone/lib/encoder"
    44  	"github.com/rclone/rclone/lib/oauthutil"
    45  	"github.com/rclone/rclone/lib/pacer"
    46  	"github.com/rclone/rclone/lib/readers"
    47  	"golang.org/x/oauth2"
    48  	"golang.org/x/oauth2/google"
    49  	drive_v2 "google.golang.org/api/drive/v2"
    50  	drive "google.golang.org/api/drive/v3"
    51  	"google.golang.org/api/googleapi"
    52  )
    53  
    54  // Constants
    55  const (
    56  	rcloneClientID              = "202264815644.apps.googleusercontent.com"
    57  	rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
    58  	driveFolderType             = "application/vnd.google-apps.folder"
    59  	shortcutMimeType            = "application/vnd.google-apps.shortcut"
    60  	timeFormatIn                = time.RFC3339
    61  	timeFormatOut               = "2006-01-02T15:04:05.000000000Z07:00"
    62  	defaultMinSleep             = fs.Duration(100 * time.Millisecond)
    63  	defaultBurst                = 100
    64  	defaultExportExtensions     = "docx,xlsx,pptx,svg"
    65  	scopePrefix                 = "https://www.googleapis.com/auth/"
    66  	defaultScope                = "drive"
    67  	// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
    68  	// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
    69  	minChunkSize     = 256 * fs.KibiByte
    70  	defaultChunkSize = 8 * fs.MebiByte
    71  	partialFields    = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
    72  	listRGrouping    = 50   // number of IDs to search at once when using ListR
    73  	listRInputBuffer = 1000 // size of input buffer when using ListR
    74  )
    75  
    76  // Globals
    77  var (
    78  	// Description of how to auth for this app
    79  	driveConfig = &oauth2.Config{
    80  		Scopes:       []string{scopePrefix + "drive"},
    81  		Endpoint:     google.Endpoint,
    82  		ClientID:     rcloneClientID,
    83  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
    84  		RedirectURL:  oauthutil.TitleBarRedirectURL,
    85  	}
    86  	_mimeTypeToExtensionDuplicates = map[string]string{
    87  		"application/x-vnd.oasis.opendocument.presentation": ".odp",
    88  		"application/x-vnd.oasis.opendocument.spreadsheet":  ".ods",
    89  		"application/x-vnd.oasis.opendocument.text":         ".odt",
    90  		"image/jpg":   ".jpg",
    91  		"image/x-bmp": ".bmp",
    92  		"image/x-png": ".png",
    93  		"text/rtf":    ".rtf",
    94  	}
    95  	_mimeTypeToExtension = map[string]string{
    96  		"application/epub+zip":                            ".epub",
    97  		"application/json":                                ".json",
    98  		"application/msword":                              ".doc",
    99  		"application/pdf":                                 ".pdf",
   100  		"application/rtf":                                 ".rtf",
   101  		"application/vnd.ms-excel":                        ".xls",
   102  		"application/vnd.oasis.opendocument.presentation": ".odp",
   103  		"application/vnd.oasis.opendocument.spreadsheet":  ".ods",
   104  		"application/vnd.oasis.opendocument.text":         ".odt",
   105  		"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
   106  		"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":         ".xlsx",
   107  		"application/vnd.openxmlformats-officedocument.wordprocessingml.document":   ".docx",
   108  		"application/x-msmetafile":  ".wmf",
   109  		"application/zip":           ".zip",
   110  		"image/bmp":                 ".bmp",
   111  		"image/jpeg":                ".jpg",
   112  		"image/pjpeg":               ".pjpeg",
   113  		"image/png":                 ".png",
   114  		"image/svg+xml":             ".svg",
   115  		"text/csv":                  ".csv",
   116  		"text/html":                 ".html",
   117  		"text/plain":                ".txt",
   118  		"text/tab-separated-values": ".tsv",
   119  	}
   120  	_mimeTypeToExtensionLinks = map[string]string{
   121  		"application/x-link-desktop": ".desktop",
   122  		"application/x-link-html":    ".link.html",
   123  		"application/x-link-url":     ".url",
   124  		"application/x-link-webloc":  ".webloc",
   125  	}
   126  	_mimeTypeCustomTransform = map[string]string{
   127  		"application/vnd.google-apps.script+json": "application/json",
   128  	}
   129  	fetchFormatsOnce sync.Once                     // make sure we fetch the export/import formats only once
   130  	_exportFormats   map[string][]string           // allowed export MIME type conversions
   131  	_importFormats   map[string][]string           // allowed import MIME type conversions
   132  	templatesOnce    sync.Once                     // parse link templates only once
   133  	_linkTemplates   map[string]*template.Template // available link types
   134  )
   135  
   136  // Parse the scopes option returning a slice of scopes
   137  func driveScopes(scopesString string) (scopes []string) {
   138  	if scopesString == "" {
   139  		scopesString = defaultScope
   140  	}
   141  	for _, scope := range strings.Split(scopesString, ",") {
   142  		scope = strings.TrimSpace(scope)
   143  		scopes = append(scopes, scopePrefix+scope)
   144  	}
   145  	return scopes
   146  }
   147  
   148  // Returns true if one of the scopes was "drive.appfolder"
   149  func driveScopesContainsAppFolder(scopes []string) bool {
   150  	for _, scope := range scopes {
   151  		if scope == scopePrefix+"drive.appfolder" {
   152  			return true
   153  		}
   154  
   155  	}
   156  	return false
   157  }
   158  
   159  // Register with Fs
   160  func init() {
   161  	fs.Register(&fs.RegInfo{
   162  		Name:        "drive",
   163  		Description: "Google Drive",
   164  		NewFs:       NewFs,
   165  		CommandHelp: commandHelp,
   166  		Config: func(name string, m configmap.Mapper) {
   167  			ctx := context.TODO()
   168  			// Parse config into Options struct
   169  			opt := new(Options)
   170  			err := configstruct.Set(m, opt)
   171  			if err != nil {
   172  				fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
   173  				return
   174  			}
   175  
   176  			// Fill in the scopes
   177  			driveConfig.Scopes = driveScopes(opt.Scope)
   178  			// Set the root_folder_id if using drive.appfolder
   179  			if driveScopesContainsAppFolder(driveConfig.Scopes) {
   180  				m.Set("root_folder_id", "appDataFolder")
   181  			}
   182  
   183  			if opt.ServiceAccountFile == "" {
   184  				err = oauthutil.Config("drive", name, m, driveConfig, nil)
   185  				if err != nil {
   186  					log.Fatalf("Failed to configure token: %v", err)
   187  				}
   188  			}
   189  			err = configTeamDrive(ctx, opt, m, name)
   190  			if err != nil {
   191  				log.Fatalf("Failed to configure team drive: %v", err)
   192  			}
   193  		},
   194  		Options: []fs.Option{{
   195  			Name: config.ConfigClientID,
   196  			Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
   197  		}, {
   198  			Name: config.ConfigClientSecret,
   199  			Help: "Google Application Client Secret\nSetting your own is recommended.",
   200  		}, {
   201  			Name: "scope",
   202  			Help: "Scope that rclone should use when requesting access from drive.",
   203  			Examples: []fs.OptionExample{{
   204  				Value: "drive",
   205  				Help:  "Full access all files, excluding Application Data Folder.",
   206  			}, {
   207  				Value: "drive.readonly",
   208  				Help:  "Read-only access to file metadata and file contents.",
   209  			}, {
   210  				Value: "drive.file",
   211  				Help:  "Access to files created by rclone only.\nThese are visible in the drive website.\nFile authorization is revoked when the user deauthorizes the app.",
   212  			}, {
   213  				Value: "drive.appfolder",
   214  				Help:  "Allows read and write access to the Application Data folder.\nThis is not visible in the drive website.",
   215  			}, {
   216  				Value: "drive.metadata.readonly",
   217  				Help:  "Allows read-only access to file metadata but\ndoes not allow any access to read or download file content.",
   218  			}},
   219  		}, {
   220  			Name: "root_folder_id",
   221  			Help: `ID of the root folder
   222  Leave blank normally.
   223  
   224  Fill in to access "Computers" folders (see docs), or for rclone to use
   225  a non root folder as its starting point.
   226  
   227  Note that if this is blank, the first time rclone runs it will fill it
   228  in with the ID of the root folder.
   229  `,
   230  		}, {
   231  			Name: "service_account_file",
   232  			Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   233  		}, {
   234  			Name:     "service_account_credentials",
   235  			Help:     "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
   236  			Hide:     fs.OptionHideConfigurator,
   237  			Advanced: true,
   238  		}, {
   239  			Name:     "team_drive",
   240  			Help:     "ID of the Team Drive",
   241  			Hide:     fs.OptionHideConfigurator,
   242  			Advanced: true,
   243  		}, {
   244  			Name:     "auth_owner_only",
   245  			Default:  false,
   246  			Help:     "Only consider files owned by the authenticated user.",
   247  			Advanced: true,
   248  		}, {
   249  			Name:     "use_trash",
   250  			Default:  true,
   251  			Help:     "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
   252  			Advanced: true,
   253  		}, {
   254  			Name:     "skip_gdocs",
   255  			Default:  false,
   256  			Help:     "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
   257  			Advanced: true,
   258  		}, {
   259  			Name:    "skip_checksum_gphotos",
   260  			Default: false,
   261  			Help: `Skip MD5 checksum on Google photos and videos only.
   262  
   263  Use this if you get checksum errors when transferring Google photos or
   264  videos.
   265  
   266  Setting this flag will cause Google photos and videos to return a
   267  blank MD5 checksum.
   268  
   269  Google photos are identified by being in the "photos" space.
   270  
   271  Corrupted checksums are caused by Google modifying the image/video but
   272  not updating the checksum.`,
   273  			Advanced: true,
   274  		}, {
   275  			Name:    "shared_with_me",
   276  			Default: false,
   277  			Help: `Only show files that are shared with me.
   278  
   279  Instructs rclone to operate on your "Shared with me" folder (where
   280  Google Drive lets you access the files and folders others have shared
   281  with you).
   282  
   283  This works both with the "list" (lsd, lsl, etc) and the "copy"
   284  commands (copy, sync, etc), and with all other commands too.`,
   285  			Advanced: true,
   286  		}, {
   287  			Name:     "trashed_only",
   288  			Default:  false,
   289  			Help:     "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
   290  			Advanced: true,
   291  		}, {
   292  			Name:     "formats",
   293  			Default:  "",
   294  			Help:     "Deprecated: see export_formats",
   295  			Advanced: true,
   296  			Hide:     fs.OptionHideConfigurator,
   297  		}, {
   298  			Name:     "export_formats",
   299  			Default:  defaultExportExtensions,
   300  			Help:     "Comma separated list of preferred formats for downloading Google docs.",
   301  			Advanced: true,
   302  		}, {
   303  			Name:     "import_formats",
   304  			Default:  "",
   305  			Help:     "Comma separated list of preferred formats for uploading Google docs.",
   306  			Advanced: true,
   307  		}, {
   308  			Name:     "allow_import_name_change",
   309  			Default:  false,
   310  			Help:     "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
   311  			Advanced: true,
   312  		}, {
   313  			Name:    "use_created_date",
   314  			Default: false,
   315  			Help: `Use file created date instead of modified date.,
   316  
   317  Useful when downloading data and you want the creation date used in
   318  place of the last modified date.
   319  
   320  **WARNING**: This flag may have some unexpected consequences.
   321  
   322  When uploading to your drive all files will be overwritten unless they
   323  haven't been modified since their creation. And the inverse will occur
   324  while downloading.  This side effect can be avoided by using the
   325  "--checksum" flag.
   326  
   327  This feature was implemented to retain photos capture date as recorded
   328  by google photos. You will first need to check the "Create a Google
   329  Photos folder" option in your google drive settings. You can then copy
   330  or move the photos locally and use the date the image was taken
   331  (created) set as the modification date.`,
   332  			Advanced: true,
   333  			Hide:     fs.OptionHideConfigurator,
   334  		}, {
   335  			Name:    "use_shared_date",
   336  			Default: false,
   337  			Help: `Use date file was shared instead of modified date.
   338  
   339  Note that, as with "--drive-use-created-date", this flag may have
   340  unexpected consequences when uploading/downloading files.
   341  
   342  If both this flag and "--drive-use-created-date" are set, the created
   343  date is used.`,
   344  			Advanced: true,
   345  			Hide:     fs.OptionHideConfigurator,
   346  		}, {
   347  			Name:     "list_chunk",
   348  			Default:  1000,
   349  			Help:     "Size of listing chunk 100-1000. 0 to disable.",
   350  			Advanced: true,
   351  		}, {
   352  			Name:    "impersonate",
   353  			Default: "",
   354  			Help: `Impersonate this user when using a service account.
   355  
   356  Note that if this is used then "root_folder_id" will be ignored.
   357  `,
   358  			Advanced: true,
   359  		}, {
   360  			Name:    "alternate_export",
   361  			Default: false,
   362  			Help: `Use alternate export URLs for google documents export.,
   363  
   364  If this option is set this instructs rclone to use an alternate set of
   365  export URLs for drive documents.  Users have reported that the
   366  official export URLs can't export large documents, whereas these
   367  unofficial ones can.
   368  
   369  See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background,
   370  [this google drive issue](https://issuetracker.google.com/issues/36761333) and
   371  [this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
   372  			Advanced: true,
   373  		}, {
   374  			Name:     "upload_cutoff",
   375  			Default:  defaultChunkSize,
   376  			Help:     "Cutoff for switching to chunked upload",
   377  			Advanced: true,
   378  		}, {
   379  			Name:    "chunk_size",
   380  			Default: defaultChunkSize,
   381  			Help: `Upload chunk size. Must a power of 2 >= 256k.
   382  
   383  Making this larger will improve performance, but note that each chunk
   384  is buffered in memory one per transfer.
   385  
   386  Reducing this will reduce memory usage but decrease performance.`,
   387  			Advanced: true,
   388  		}, {
   389  			Name:    "acknowledge_abuse",
   390  			Default: false,
   391  			Help: `Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
   392  
   393  If downloading a file returns the error "This file has been identified
   394  as malware or spam and cannot be downloaded" with the error code
   395  "cannotDownloadAbusiveFile" then supply this flag to rclone to
   396  indicate you acknowledge the risks of downloading the file and rclone
   397  will download it anyway.`,
   398  			Advanced: true,
   399  		}, {
   400  			Name:     "keep_revision_forever",
   401  			Default:  false,
   402  			Help:     "Keep new head revision of each file forever.",
   403  			Advanced: true,
   404  		}, {
   405  			Name:    "size_as_quota",
   406  			Default: false,
   407  			Help: `Show sizes as storage quota usage, not actual size.
   408  
   409  Show the size of a file as the storage quota used. This is the
   410  current version plus any older versions that have been set to keep
   411  forever.
   412  
   413  **WARNING**: This flag may have some unexpected consequences.
   414  
   415  It is not recommended to set this flag in your config - the
   416  recommended usage is using the flag form --drive-size-as-quota when
   417  doing rclone ls/lsl/lsf/lsjson/etc only.
   418  
   419  If you do use this flag for syncing (not recommended) then you will
   420  need to use --ignore size also.`,
   421  			Advanced: true,
   422  			Hide:     fs.OptionHideConfigurator,
   423  		}, {
   424  			Name:     "v2_download_min_size",
   425  			Default:  fs.SizeSuffix(-1),
   426  			Help:     "If Object's are greater, use drive v2 API to download.",
   427  			Advanced: true,
   428  		}, {
   429  			Name:     "pacer_min_sleep",
   430  			Default:  defaultMinSleep,
   431  			Help:     "Minimum time to sleep between API calls.",
   432  			Advanced: true,
   433  		}, {
   434  			Name:     "pacer_burst",
   435  			Default:  defaultBurst,
   436  			Help:     "Number of API calls to allow without sleeping.",
   437  			Advanced: true,
   438  		}, {
   439  			Name:    "server_side_across_configs",
   440  			Default: false,
   441  			Help: `Allow server side operations (eg copy) to work across different drive configs.
   442  
   443  This can be useful if you wish to do a server side copy between two
   444  different Google drives.  Note that this isn't enabled by default
   445  because it isn't easy to tell if it will work between any two
   446  configurations.`,
   447  			Advanced: true,
   448  		}, {
   449  			Name:    "disable_http2",
   450  			Default: true,
   451  			Help: `Disable drive using http2
   452  
   453  There is currently an unsolved issue with the google drive backend and
   454  HTTP/2.  HTTP/2 is therefore disabled by default for the drive backend
   455  but can be re-enabled here.  When the issue is solved this flag will
   456  be removed.
   457  
   458  See: https://github.com/rclone/rclone/issues/3631
   459  
   460  `,
   461  			Advanced: true,
   462  		}, {
   463  			Name:    "stop_on_upload_limit",
   464  			Default: false,
   465  			Help: `Make upload limit errors be fatal
   466  
   467  At the time of writing it is only possible to upload 750GB of data to
   468  Google Drive a day (this is an undocumented limit). When this limit is
   469  reached Google Drive produces a slightly different error message. When
   470  this flag is set it causes these errors to be fatal.  These will stop
   471  the in-progress sync.
   472  
   473  Note that this detection is relying on error message strings which
   474  Google don't document so it may break in the future.
   475  
   476  See: https://github.com/rclone/rclone/issues/3857
   477  `,
   478  			Advanced: true,
   479  		}, {
   480  			Name: "skip_shortcuts",
   481  			Help: `If set skip shortcut files
   482  
   483  Normally rclone dereferences shortcut files making them appear as if
   484  they are the original file (see [the shortcuts section](#shortcuts)).
   485  If this flag is set then rclone will ignore shortcut files completely.
   486  `,
   487  			Advanced: true,
   488  			Default:  false,
   489  		}, {
   490  			Name:     config.ConfigEncoding,
   491  			Help:     config.ConfigEncodingHelp,
   492  			Advanced: true,
   493  			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
   494  			// Don't encode / as it's a valid name character in drive.
   495  			Default: encoder.EncodeInvalidUtf8,
   496  		}},
   497  	})
   498  
   499  	// register duplicate MIME types first
   500  	// this allows them to be used with mime.ExtensionsByType() but
   501  	// mime.TypeByExtension() will return the later registered MIME type
   502  	for _, m := range []map[string]string{
   503  		_mimeTypeToExtensionDuplicates, _mimeTypeToExtension, _mimeTypeToExtensionLinks,
   504  	} {
   505  		for mimeType, extension := range m {
   506  			if err := mime.AddExtensionType(extension, mimeType); err != nil {
   507  				log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
   508  			}
   509  		}
   510  	}
   511  }
   512  
   513  // Options defines the configuration for this backend
   514  type Options struct {
   515  	Scope                     string               `config:"scope"`
   516  	RootFolderID              string               `config:"root_folder_id"`
   517  	ServiceAccountFile        string               `config:"service_account_file"`
   518  	ServiceAccountCredentials string               `config:"service_account_credentials"`
   519  	TeamDriveID               string               `config:"team_drive"`
   520  	AuthOwnerOnly             bool                 `config:"auth_owner_only"`
   521  	UseTrash                  bool                 `config:"use_trash"`
   522  	SkipGdocs                 bool                 `config:"skip_gdocs"`
   523  	SkipChecksumGphotos       bool                 `config:"skip_checksum_gphotos"`
   524  	SharedWithMe              bool                 `config:"shared_with_me"`
   525  	TrashedOnly               bool                 `config:"trashed_only"`
   526  	Extensions                string               `config:"formats"`
   527  	ExportExtensions          string               `config:"export_formats"`
   528  	ImportExtensions          string               `config:"import_formats"`
   529  	AllowImportNameChange     bool                 `config:"allow_import_name_change"`
   530  	UseCreatedDate            bool                 `config:"use_created_date"`
   531  	UseSharedDate             bool                 `config:"use_shared_date"`
   532  	ListChunk                 int64                `config:"list_chunk"`
   533  	Impersonate               string               `config:"impersonate"`
   534  	AlternateExport           bool                 `config:"alternate_export"`
   535  	UploadCutoff              fs.SizeSuffix        `config:"upload_cutoff"`
   536  	ChunkSize                 fs.SizeSuffix        `config:"chunk_size"`
   537  	AcknowledgeAbuse          bool                 `config:"acknowledge_abuse"`
   538  	KeepRevisionForever       bool                 `config:"keep_revision_forever"`
   539  	SizeAsQuota               bool                 `config:"size_as_quota"`
   540  	V2DownloadMinSize         fs.SizeSuffix        `config:"v2_download_min_size"`
   541  	PacerMinSleep             fs.Duration          `config:"pacer_min_sleep"`
   542  	PacerBurst                int                  `config:"pacer_burst"`
   543  	ServerSideAcrossConfigs   bool                 `config:"server_side_across_configs"`
   544  	DisableHTTP2              bool                 `config:"disable_http2"`
   545  	StopOnUploadLimit         bool                 `config:"stop_on_upload_limit"`
   546  	SkipShortcuts             bool                 `config:"skip_shortcuts"`
   547  	Enc                       encoder.MultiEncoder `config:"encoding"`
   548  }
   549  
   550  // Fs represents a remote drive server
   551  type Fs struct {
   552  	name             string             // name of this remote
   553  	root             string             // the path we are working on
   554  	opt              Options            // parsed options
   555  	features         *fs.Features       // optional features
   556  	svc              *drive.Service     // the connection to the drive server
   557  	v2Svc            *drive_v2.Service  // used to create download links for the v2 api
   558  	client           *http.Client       // authorized client
   559  	rootFolderID     string             // the id of the root folder
   560  	dirCache         *dircache.DirCache // Map of directory path to directory id
   561  	pacer            *fs.Pacer          // To pace the API calls
   562  	exportExtensions []string           // preferred extensions to download docs
   563  	importMimeTypes  []string           // MIME types to convert to docs
   564  	isTeamDrive      bool               // true if this is a team drive
   565  	fileFields       googleapi.Field    // fields to fetch file info with
   566  	m                configmap.Mapper
   567  	grouping         int32               // number of IDs to search at once in ListR - read with atomic
   568  	listRmu          *sync.Mutex         // protects listRempties
   569  	listRempties     map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
   570  }
   571  
   572  type baseObject struct {
   573  	fs           *Fs    // what this object is part of
   574  	remote       string // The remote path
   575  	id           string // Drive Id of this object
   576  	modifiedDate string // RFC3339 time it was last modified
   577  	mimeType     string // The object MIME type
   578  	bytes        int64  // size of the object
   579  	parents      int    // number of parents
   580  }
   581  type documentObject struct {
   582  	baseObject
   583  	url              string // Download URL of this object
   584  	documentMimeType string // the original document MIME type
   585  	extLen           int    // The length of the added export extension
   586  }
   587  type linkObject struct {
   588  	baseObject
   589  	content []byte // The file content generated by a link template
   590  	extLen  int    // The length of the added export extension
   591  }
   592  
   593  // Object describes a drive object
   594  type Object struct {
   595  	baseObject
   596  	url        string // Download URL of this object
   597  	md5sum     string // md5sum of the object
   598  	v2Download bool   // generate v2 download link ondemand
   599  }
   600  
   601  // ------------------------------------------------------------
   602  
   603  // Name of the remote (as passed into NewFs)
   604  func (f *Fs) Name() string {
   605  	return f.name
   606  }
   607  
   608  // Root of the remote (as passed into NewFs)
   609  func (f *Fs) Root() string {
   610  	return f.root
   611  }
   612  
   613  // String converts this Fs to a string
   614  func (f *Fs) String() string {
   615  	return fmt.Sprintf("Google drive root '%s'", f.root)
   616  }
   617  
   618  // Features returns the optional features of this Fs
   619  func (f *Fs) Features() *fs.Features {
   620  	return f.features
   621  }
   622  
   623  // shouldRetry determines whether a given err rates being retried
   624  func (f *Fs) shouldRetry(err error) (bool, error) {
   625  	if err == nil {
   626  		return false, nil
   627  	}
   628  	if fserrors.ShouldRetry(err) {
   629  		return true, err
   630  	}
   631  	switch gerr := err.(type) {
   632  	case *googleapi.Error:
   633  		if gerr.Code >= 500 && gerr.Code < 600 {
   634  			// All 5xx errors should be retried
   635  			return true, err
   636  		}
   637  		if len(gerr.Errors) > 0 {
   638  			reason := gerr.Errors[0].Reason
   639  			if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
   640  				if f.opt.StopOnUploadLimit && gerr.Errors[0].Message == "User rate limit exceeded." {
   641  					fs.Errorf(f, "Received upload limit error: %v", err)
   642  					return false, fserrors.FatalError(err)
   643  				}
   644  				return true, err
   645  			} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
   646  				fs.Errorf(f, "Received team drive file limit error: %v", err)
   647  				return false, fserrors.FatalError(err)
   648  			}
   649  		}
   650  	}
   651  	return false, err
   652  }
   653  
   654  // parseParse parses a drive 'url'
   655  func parseDrivePath(path string) (root string, err error) {
   656  	root = strings.Trim(path, "/")
   657  	return
   658  }
   659  
   660  // User function to process a File item from list
   661  //
   662  // Should return true to finish processing
   663  type listFn func(*drive.File) bool
   664  
   665  func containsString(slice []string, s string) bool {
   666  	for _, e := range slice {
   667  		if e == s {
   668  			return true
   669  		}
   670  	}
   671  	return false
   672  }
   673  
   674  // getFile returns drive.File for the ID passed and fields passed in
   675  func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
   676  	err = f.pacer.CallNoRetry(func() (bool, error) {
   677  		info, err = f.svc.Files.Get(ID).
   678  			Fields(fields).
   679  			SupportsAllDrives(true).
   680  			Do()
   681  		return f.shouldRetry(err)
   682  	})
   683  	return info, err
   684  }
   685  
   686  // getRootID returns the canonical ID for the "root" ID
   687  func (f *Fs) getRootID() (string, error) {
   688  	info, err := f.getFile("root", "id")
   689  	if err != nil {
   690  		return "", errors.Wrap(err, "couldn't find root directory ID")
   691  	}
   692  	return info.Id, nil
   693  }
   694  
   695  // Lists the directory required calling the user function on each item found
   696  //
   697  // If the user fn ever returns true then it early exits with found = true
   698  //
   699  // Search params: https://developers.google.com/drive/search-parameters
   700  func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
   701  	var query []string
   702  	if !includeAll {
   703  		q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
   704  		if f.opt.TrashedOnly {
   705  			q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
   706  		}
   707  		query = append(query, q)
   708  	}
   709  	// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
   710  	// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
   711  	// If we need to list file inside those shared folders, we must search it without sharedWithMe
   712  	parentsQuery := bytes.NewBufferString("(")
   713  	for _, dirID := range dirIDs {
   714  		if dirID == "" {
   715  			continue
   716  		}
   717  		if parentsQuery.Len() > 1 {
   718  			_, _ = parentsQuery.WriteString(" or ")
   719  		}
   720  		if f.opt.SharedWithMe && dirID == f.rootFolderID {
   721  			_, _ = parentsQuery.WriteString("sharedWithMe=true")
   722  		} else {
   723  			_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
   724  		}
   725  	}
   726  	if parentsQuery.Len() > 1 {
   727  		_ = parentsQuery.WriteByte(')')
   728  		query = append(query, parentsQuery.String())
   729  	}
   730  	var stems []string
   731  	if title != "" {
   732  		searchTitle := f.opt.Enc.FromStandardName(title)
   733  		// Escaping the backslash isn't documented but seems to work
   734  		searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
   735  		searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
   736  
   737  		var titleQuery bytes.Buffer
   738  		_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
   739  		if !directoriesOnly && !f.opt.SkipGdocs {
   740  			// If the search title has an extension that is in the export extensions add a search
   741  			// for the filename without the extension.
   742  			// Assume that export extensions don't contain escape sequences.
   743  			for _, ext := range f.exportExtensions {
   744  				if strings.HasSuffix(searchTitle, ext) {
   745  					stems = append(stems, title[:len(title)-len(ext)])
   746  					_, _ = fmt.Fprintf(&titleQuery, " or name='%s'", searchTitle[:len(searchTitle)-len(ext)])
   747  				}
   748  			}
   749  		}
   750  		_ = titleQuery.WriteByte(')')
   751  		query = append(query, titleQuery.String())
   752  	}
   753  	if directoriesOnly {
   754  		query = append(query, fmt.Sprintf("(mimeType='%s' or mimeType='%s')", driveFolderType, shortcutMimeType))
   755  	}
   756  	if filesOnly {
   757  		query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
   758  	}
   759  	list := f.svc.Files.List()
   760  	if len(query) > 0 {
   761  		list.Q(strings.Join(query, " and "))
   762  		// fmt.Printf("list Query = %q\n", query)
   763  	}
   764  	if f.opt.ListChunk > 0 {
   765  		list.PageSize(f.opt.ListChunk)
   766  	}
   767  	list.SupportsAllDrives(true)
   768  	list.IncludeItemsFromAllDrives(true)
   769  	if f.isTeamDrive {
   770  		list.DriveId(f.opt.TeamDriveID)
   771  		list.Corpora("drive")
   772  	}
   773  	// If using appDataFolder then need to add Spaces
   774  	if f.rootFolderID == "appDataFolder" {
   775  		list.Spaces("appDataFolder")
   776  	}
   777  
   778  	fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
   779  
   780  OUTER:
   781  	for {
   782  		var files *drive.FileList
   783  		err = f.pacer.Call(func() (bool, error) {
   784  			files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
   785  			return f.shouldRetry(err)
   786  		})
   787  		if err != nil {
   788  			return false, errors.Wrap(err, "couldn't list directory")
   789  		}
   790  		if files.IncompleteSearch {
   791  			fs.Errorf(f, "search result INCOMPLETE")
   792  		}
   793  		for _, item := range files.Files {
   794  			item.Name = f.opt.Enc.ToStandardName(item.Name)
   795  			if isShortcut(item) {
   796  				// ignore shortcuts if directed
   797  				if f.opt.SkipShortcuts {
   798  					continue
   799  				}
   800  				// skip file shortcuts if directory only
   801  				if directoriesOnly && item.ShortcutDetails.TargetMimeType != driveFolderType {
   802  					continue
   803  				}
   804  				// skip directory shortcuts if file only
   805  				if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
   806  					continue
   807  				}
   808  				item, err = f.resolveShortcut(item)
   809  				if err != nil {
   810  					return false, errors.Wrap(err, "list")
   811  				}
   812  			}
   813  			// Check the case of items is correct since
   814  			// the `=` operator is case insensitive.
   815  			if title != "" && title != item.Name {
   816  				found := false
   817  				for _, stem := range stems {
   818  					if stem == item.Name {
   819  						found = true
   820  						break
   821  					}
   822  				}
   823  				if !found {
   824  					continue
   825  				}
   826  				_, exportName, _, _ := f.findExportFormat(item)
   827  				if exportName == "" || exportName != title {
   828  					continue
   829  				}
   830  			}
   831  			if fn(item) {
   832  				found = true
   833  				break OUTER
   834  			}
   835  		}
   836  		if files.NextPageToken == "" {
   837  			break
   838  		}
   839  		list.PageToken(files.NextPageToken)
   840  	}
   841  	return
   842  }
   843  
   844  // Returns true of x is a power of 2 or zero
   845  func isPowerOfTwo(x int64) bool {
   846  	switch {
   847  	case x == 0:
   848  		return true
   849  	case x < 0:
   850  		return false
   851  	default:
   852  		return (x & (x - 1)) == 0
   853  	}
   854  }
   855  
   856  // add a charset parameter to all text/* MIME types
   857  func fixMimeType(mimeTypeIn string) string {
   858  	if mimeTypeIn == "" {
   859  		return ""
   860  	}
   861  	mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
   862  	if err != nil {
   863  		return mimeTypeIn
   864  	}
   865  	mimeTypeOut := mimeTypeIn
   866  	if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
   867  		param["charset"] = "utf-8"
   868  		mimeTypeOut = mime.FormatMediaType(mediaType, param)
   869  	}
   870  	if mimeTypeOut == "" {
   871  		panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
   872  	}
   873  	return mimeTypeOut
   874  }
   875  func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
   876  	out = make(map[string][]string, len(in))
   877  	for k, v := range in {
   878  		for i, mt := range v {
   879  			v[i] = fixMimeType(mt)
   880  		}
   881  		out[fixMimeType(k)] = v
   882  	}
   883  	return out
   884  }
   885  func isInternalMimeType(mimeType string) bool {
   886  	return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
   887  }
   888  func isLinkMimeType(mimeType string) bool {
   889  	return strings.HasPrefix(mimeType, "application/x-link-")
   890  }
   891  
   892  // parseExtensions parses a list of comma separated extensions
   893  // into a list of unique extensions with leading "." and a list of associated MIME types
   894  func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
   895  	for _, extensionText := range extensionsIn {
   896  		for _, extension := range strings.Split(extensionText, ",") {
   897  			extension = strings.ToLower(strings.TrimSpace(extension))
   898  			if extension == "" {
   899  				continue
   900  			}
   901  			if len(extension) > 0 && extension[0] != '.' {
   902  				extension = "." + extension
   903  			}
   904  			mt := mime.TypeByExtension(extension)
   905  			if mt == "" {
   906  				return extensions, mimeTypes, errors.Errorf("couldn't find MIME type for extension %q", extension)
   907  			}
   908  			if !containsString(extensions, extension) {
   909  				extensions = append(extensions, extension)
   910  				mimeTypes = append(mimeTypes, mt)
   911  			}
   912  		}
   913  	}
   914  	return
   915  }
   916  
   917  // Figure out if the user wants to use a team drive
   918  func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
   919  	// Stop if we are running non-interactive config
   920  	if fs.Config.AutoConfirm {
   921  		return nil
   922  	}
   923  	if opt.TeamDriveID == "" {
   924  		fmt.Printf("Configure this as a team drive?\n")
   925  	} else {
   926  		fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
   927  	}
   928  	if !config.Confirm(false) {
   929  		return nil
   930  	}
   931  	client, err := createOAuthClient(opt, name, m)
   932  	if err != nil {
   933  		return errors.Wrap(err, "config team drive failed to create oauth client")
   934  	}
   935  	svc, err := drive.New(client)
   936  	if err != nil {
   937  		return errors.Wrap(err, "config team drive failed to make drive client")
   938  	}
   939  	fmt.Printf("Fetching team drive list...\n")
   940  	var driveIDs, driveNames []string
   941  	listTeamDrives := svc.Teamdrives.List().PageSize(100)
   942  	listFailed := false
   943  	var defaultFs Fs // default Fs with default Options
   944  	for {
   945  		var teamDrives *drive.TeamDriveList
   946  		err = newPacer(opt).Call(func() (bool, error) {
   947  			teamDrives, err = listTeamDrives.Context(ctx).Do()
   948  			return defaultFs.shouldRetry(err)
   949  		})
   950  		if err != nil {
   951  			fmt.Printf("Listing team drives failed: %v\n", err)
   952  			listFailed = true
   953  			break
   954  		}
   955  		for _, drive := range teamDrives.TeamDrives {
   956  			driveIDs = append(driveIDs, drive.Id)
   957  			driveNames = append(driveNames, drive.Name)
   958  		}
   959  		if teamDrives.NextPageToken == "" {
   960  			break
   961  		}
   962  		listTeamDrives.PageToken(teamDrives.NextPageToken)
   963  	}
   964  	var driveID string
   965  	if !listFailed && len(driveIDs) == 0 {
   966  		fmt.Printf("No team drives found in your account")
   967  	} else {
   968  		driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
   969  	}
   970  	m.Set("team_drive", driveID)
   971  	opt.TeamDriveID = driveID
   972  	return nil
   973  }
   974  
   975  // newPacer makes a pacer configured for drive
   976  func newPacer(opt *Options) *fs.Pacer {
   977  	return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
   978  }
   979  
   980  // getClient makes an http client according to the options
   981  func getClient(opt *Options) *http.Client {
   982  	t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
   983  		if opt.DisableHTTP2 {
   984  			t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
   985  		}
   986  	})
   987  	return &http.Client{
   988  		Transport: t,
   989  	}
   990  }
   991  
   992  func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
   993  	scopes := driveScopes(opt.Scope)
   994  	conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
   995  	if err != nil {
   996  		return nil, errors.Wrap(err, "error processing credentials")
   997  	}
   998  	if opt.Impersonate != "" {
   999  		conf.Subject = opt.Impersonate
  1000  	}
  1001  	ctxWithSpecialClient := oauthutil.Context(getClient(opt))
  1002  	return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
  1003  }
  1004  
  1005  func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
  1006  	var oAuthClient *http.Client
  1007  	var err error
  1008  
  1009  	// try loading service account credentials from env variable, then from a file
  1010  	if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
  1011  		loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
  1012  		if err != nil {
  1013  			return nil, errors.Wrap(err, "error opening service account credentials file")
  1014  		}
  1015  		opt.ServiceAccountCredentials = string(loadedCreds)
  1016  	}
  1017  	if opt.ServiceAccountCredentials != "" {
  1018  		oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
  1019  		if err != nil {
  1020  			return nil, errors.Wrap(err, "failed to create oauth client from service account")
  1021  		}
  1022  	} else {
  1023  		oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
  1024  		if err != nil {
  1025  			return nil, errors.Wrap(err, "failed to create oauth client")
  1026  		}
  1027  	}
  1028  
  1029  	return oAuthClient, nil
  1030  }
  1031  
  1032  func checkUploadChunkSize(cs fs.SizeSuffix) error {
  1033  	if !isPowerOfTwo(int64(cs)) {
  1034  		return errors.Errorf("%v isn't a power of two", cs)
  1035  	}
  1036  	if cs < minChunkSize {
  1037  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
  1038  	}
  1039  	return nil
  1040  }
  1041  
  1042  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1043  	err = checkUploadChunkSize(cs)
  1044  	if err == nil {
  1045  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
  1046  	}
  1047  	return
  1048  }
  1049  
  1050  func checkUploadCutoff(cs fs.SizeSuffix) error {
  1051  	return nil
  1052  }
  1053  
  1054  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1055  	err = checkUploadCutoff(cs)
  1056  	if err == nil {
  1057  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
  1058  	}
  1059  	return
  1060  }
  1061  
  1062  // NewFs constructs an Fs from the path, container:path
  1063  func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
  1064  	ctx := context.Background()
  1065  	// Parse config into Options struct
  1066  	opt := new(Options)
  1067  	err := configstruct.Set(m, opt)
  1068  	if err != nil {
  1069  		return nil, err
  1070  	}
  1071  	err = checkUploadCutoff(opt.UploadCutoff)
  1072  	if err != nil {
  1073  		return nil, errors.Wrap(err, "drive: upload cutoff")
  1074  	}
  1075  	err = checkUploadChunkSize(opt.ChunkSize)
  1076  	if err != nil {
  1077  		return nil, errors.Wrap(err, "drive: chunk size")
  1078  	}
  1079  
  1080  	oAuthClient, err := createOAuthClient(opt, name, m)
  1081  	if err != nil {
  1082  		return nil, errors.Wrap(err, "drive: failed when making oauth client")
  1083  	}
  1084  
  1085  	root, err := parseDrivePath(path)
  1086  	if err != nil {
  1087  		return nil, err
  1088  	}
  1089  
  1090  	f := &Fs{
  1091  		name:         name,
  1092  		root:         root,
  1093  		opt:          *opt,
  1094  		pacer:        newPacer(opt),
  1095  		m:            m,
  1096  		grouping:     listRGrouping,
  1097  		listRmu:      new(sync.Mutex),
  1098  		listRempties: make(map[string]struct{}),
  1099  	}
  1100  	f.isTeamDrive = opt.TeamDriveID != ""
  1101  	f.fileFields = f.getFileFields()
  1102  	f.features = (&fs.Features{
  1103  		DuplicateFiles:          true,
  1104  		ReadMimeType:            true,
  1105  		WriteMimeType:           true,
  1106  		CanHaveEmptyDirectories: true,
  1107  		ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
  1108  	}).Fill(f)
  1109  
  1110  	// Create a new authorized Drive client.
  1111  	f.client = oAuthClient
  1112  	f.svc, err = drive.New(f.client)
  1113  	if err != nil {
  1114  		return nil, errors.Wrap(err, "couldn't create Drive client")
  1115  	}
  1116  
  1117  	if f.opt.V2DownloadMinSize >= 0 {
  1118  		f.v2Svc, err = drive_v2.New(f.client)
  1119  		if err != nil {
  1120  			return nil, errors.Wrap(err, "couldn't create Drive v2 client")
  1121  		}
  1122  	}
  1123  
  1124  	// If impersonating warn about root_folder_id if set and unset it
  1125  	//
  1126  	// This is because rclone v1.51 and v1.52 cached root_folder_id when
  1127  	// using impersonate which they shouldn't have done. It is possible
  1128  	// someone is using impersonate and root_folder_id in which case this
  1129  	// breaks their workflow. There isn't an easy way around that.
  1130  	if opt.RootFolderID != "" && opt.Impersonate != "" {
  1131  		fs.Logf(f, "Ignoring cached root_folder_id when using --drive-impersonate")
  1132  		opt.RootFolderID = ""
  1133  	}
  1134  
  1135  	// set root folder for a team drive or query the user root folder
  1136  	if opt.RootFolderID != "" {
  1137  		// override root folder if set or cached in the config and not impersonating
  1138  		f.rootFolderID = opt.RootFolderID
  1139  	} else if f.isTeamDrive {
  1140  		f.rootFolderID = f.opt.TeamDriveID
  1141  	} else {
  1142  		// Look up the root ID and cache it in the config
  1143  		rootID, err := f.getRootID()
  1144  		if err != nil {
  1145  			if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
  1146  				// 404 means that this scope does not have permission to get the
  1147  				// root so just use "root"
  1148  				rootID = "root"
  1149  			} else {
  1150  				return nil, err
  1151  			}
  1152  		}
  1153  		f.rootFolderID = rootID
  1154  		// Don't cache the root folder ID if impersonating
  1155  		if opt.Impersonate == "" {
  1156  			m.Set("root_folder_id", rootID)
  1157  		}
  1158  	}
  1159  
  1160  	f.dirCache = dircache.New(root, f.rootFolderID, f)
  1161  
  1162  	// Parse extensions
  1163  	if opt.Extensions != "" {
  1164  		if opt.ExportExtensions != defaultExportExtensions {
  1165  			return nil, errors.New("only one of 'formats' and 'export_formats' can be specified")
  1166  		}
  1167  		opt.Extensions, opt.ExportExtensions = "", opt.Extensions
  1168  	}
  1169  	f.exportExtensions, _, err = parseExtensions(opt.ExportExtensions, defaultExportExtensions)
  1170  	if err != nil {
  1171  		return nil, err
  1172  	}
  1173  
  1174  	_, f.importMimeTypes, err = parseExtensions(opt.ImportExtensions)
  1175  	if err != nil {
  1176  		return nil, err
  1177  	}
  1178  
  1179  	// Find the current root
  1180  	err = f.dirCache.FindRoot(ctx, false)
  1181  	if err != nil {
  1182  		// Assume it is a file
  1183  		newRoot, remote := dircache.SplitPath(root)
  1184  		tempF := *f
  1185  		tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
  1186  		tempF.root = newRoot
  1187  		// Make new Fs which is the parent
  1188  		err = tempF.dirCache.FindRoot(ctx, false)
  1189  		if err != nil {
  1190  			// No root so return old f
  1191  			return f, nil
  1192  		}
  1193  		_, err := tempF.NewObject(ctx, remote)
  1194  		if err != nil {
  1195  			// unable to list folder so return old f
  1196  			return f, nil
  1197  		}
  1198  		// XXX: update the old f here instead of returning tempF, since
  1199  		// `features` were already filled with functions having *f as a receiver.
  1200  		// See https://github.com/rclone/rclone/issues/2182
  1201  		f.dirCache = tempF.dirCache
  1202  		f.root = tempF.root
  1203  		return f, fs.ErrorIsFile
  1204  	}
  1205  	// fmt.Printf("Root id %s", f.dirCache.RootID())
  1206  	return f, nil
  1207  }
  1208  
  1209  func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
  1210  	modifiedDate := info.ModifiedTime
  1211  	if f.opt.UseCreatedDate {
  1212  		modifiedDate = info.CreatedTime
  1213  	} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
  1214  		modifiedDate = info.SharedWithMeTime
  1215  	}
  1216  	size := info.Size
  1217  	if f.opt.SizeAsQuota {
  1218  		size = info.QuotaBytesUsed
  1219  	}
  1220  	return baseObject{
  1221  		fs:           f,
  1222  		remote:       remote,
  1223  		id:           info.Id,
  1224  		modifiedDate: modifiedDate,
  1225  		mimeType:     info.MimeType,
  1226  		bytes:        size,
  1227  		parents:      len(info.Parents),
  1228  	}
  1229  }
  1230  
  1231  // getFileFields gets the fields for a normal file Get or List
  1232  func (f *Fs) getFileFields() (fields googleapi.Field) {
  1233  	fields = partialFields
  1234  	if f.opt.AuthOwnerOnly {
  1235  		fields += ",owners"
  1236  	}
  1237  	if f.opt.UseSharedDate {
  1238  		fields += ",sharedWithMeTime"
  1239  	}
  1240  	if f.opt.SkipChecksumGphotos {
  1241  		fields += ",spaces"
  1242  	}
  1243  	if f.opt.SizeAsQuota {
  1244  		fields += ",quotaBytesUsed"
  1245  	}
  1246  	return fields
  1247  }
  1248  
  1249  // newRegularObject creates an fs.Object for a normal drive.File
  1250  func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
  1251  	// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
  1252  	if f.opt.SkipChecksumGphotos {
  1253  		for _, space := range info.Spaces {
  1254  			if space == "photos" {
  1255  				info.Md5Checksum = ""
  1256  				break
  1257  			}
  1258  		}
  1259  	}
  1260  	return &Object{
  1261  		baseObject: f.newBaseObject(remote, info),
  1262  		url:        fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
  1263  		md5sum:     strings.ToLower(info.Md5Checksum),
  1264  		v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
  1265  	}
  1266  }
  1267  
  1268  // newDocumentObject creates an fs.Object for a google docs drive.File
  1269  func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
  1270  	mediaType, _, err := mime.ParseMediaType(exportMimeType)
  1271  	if err != nil {
  1272  		return nil, err
  1273  	}
  1274  	id := actualID(info.Id)
  1275  	url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, id, url.QueryEscape(mediaType))
  1276  	if f.opt.AlternateExport {
  1277  		switch info.MimeType {
  1278  		case "application/vnd.google-apps.drawing":
  1279  			url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", id, extension[1:])
  1280  		case "application/vnd.google-apps.document":
  1281  			url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", id, extension[1:])
  1282  		case "application/vnd.google-apps.spreadsheet":
  1283  			url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", id, extension[1:])
  1284  		case "application/vnd.google-apps.presentation":
  1285  			url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", id, extension[1:])
  1286  		}
  1287  	}
  1288  	baseObject := f.newBaseObject(remote+extension, info)
  1289  	baseObject.bytes = -1
  1290  	baseObject.mimeType = exportMimeType
  1291  	return &documentObject{
  1292  		baseObject:       baseObject,
  1293  		url:              url,
  1294  		documentMimeType: info.MimeType,
  1295  		extLen:           len(extension),
  1296  	}, nil
  1297  }
  1298  
  1299  // newLinkObject creates an fs.Object that represents a link a google docs drive.File
  1300  func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
  1301  	t := linkTemplate(exportMimeType)
  1302  	if t == nil {
  1303  		return nil, errors.Errorf("unsupported link type %s", exportMimeType)
  1304  	}
  1305  	var buf bytes.Buffer
  1306  	err := t.Execute(&buf, struct {
  1307  		URL, Title string
  1308  	}{
  1309  		info.WebViewLink, info.Name,
  1310  	})
  1311  	if err != nil {
  1312  		return nil, errors.Wrap(err, "executing template failed")
  1313  	}
  1314  
  1315  	baseObject := f.newBaseObject(remote+extension, info)
  1316  	baseObject.bytes = int64(buf.Len())
  1317  	baseObject.mimeType = exportMimeType
  1318  	return &linkObject{
  1319  		baseObject: baseObject,
  1320  		content:    buf.Bytes(),
  1321  		extLen:     len(extension),
  1322  	}, nil
  1323  }
  1324  
  1325  // newObjectWithInfo creates an fs.Object for any drive.File
  1326  //
  1327  // When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
  1328  func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
  1329  	// If item has MD5 sum or a length it is a file stored on drive
  1330  	if info.Md5Checksum != "" || info.Size > 0 {
  1331  		return f.newRegularObject(remote, info), nil
  1332  	}
  1333  
  1334  	extension, exportName, exportMimeType, isDocument := f.findExportFormat(info)
  1335  	return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
  1336  }
  1337  
  1338  // newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat
  1339  //
  1340  // When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
  1341  func (f *Fs) newObjectWithExportInfo(
  1342  	remote string, info *drive.File,
  1343  	extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
  1344  	// Note that resolveShortcut will have been called already if
  1345  	// we are being called from a listing. However the drive.Item
  1346  	// will have been resolved so this will do nothing.
  1347  	info, err = f.resolveShortcut(info)
  1348  	if err != nil {
  1349  		return nil, errors.Wrap(err, "new object")
  1350  	}
  1351  	switch {
  1352  	case info.MimeType == driveFolderType:
  1353  		return nil, fs.ErrorNotAFile
  1354  	case info.MimeType == shortcutMimeType:
  1355  		// We can only get here if f.opt.SkipShortcuts is set
  1356  		// and not from a listing. This is unlikely.
  1357  		fs.Debugf(remote, "Ignoring shortcut as skip shortcuts is set")
  1358  		return nil, fs.ErrorObjectNotFound
  1359  	case info.Md5Checksum != "" || info.Size > 0:
  1360  		// If item has MD5 sum or a length it is a file stored on drive
  1361  		return f.newRegularObject(remote, info), nil
  1362  	case f.opt.SkipGdocs:
  1363  		fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
  1364  		return nil, fs.ErrorObjectNotFound
  1365  	default:
  1366  		// If item MimeType is in the ExportFormats then it is a google doc
  1367  		if !isDocument {
  1368  			fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType)
  1369  			return nil, fs.ErrorObjectNotFound
  1370  		}
  1371  		if extension == "" {
  1372  			fs.Debugf(remote, "No export formats found for %q", info.MimeType)
  1373  			return nil, fs.ErrorObjectNotFound
  1374  		}
  1375  		if isLinkMimeType(exportMimeType) {
  1376  			return f.newLinkObject(remote, info, extension, exportMimeType)
  1377  		}
  1378  		return f.newDocumentObject(remote, info, extension, exportMimeType)
  1379  	}
  1380  }
  1381  
  1382  // NewObject finds the Object at remote.  If it can't be found
  1383  // it returns the error fs.ErrorObjectNotFound.
  1384  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  1385  	info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
  1386  	if err != nil {
  1387  		return nil, err
  1388  	}
  1389  
  1390  	remote = remote[:len(remote)-len(extension)]
  1391  	obj, err := f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
  1392  	switch {
  1393  	case err != nil:
  1394  		return nil, err
  1395  	case obj == nil:
  1396  		return nil, fs.ErrorObjectNotFound
  1397  	default:
  1398  		return obj, nil
  1399  	}
  1400  }
  1401  
  1402  // FindLeaf finds a directory of name leaf in the folder with ID pathID
  1403  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
  1404  	// Find the leaf in pathID
  1405  	pathID = actualID(pathID)
  1406  	found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
  1407  		if !f.opt.SkipGdocs {
  1408  			_, exportName, _, isDocument := f.findExportFormat(item)
  1409  			if exportName == leaf {
  1410  				pathIDOut = item.Id
  1411  				return true
  1412  			}
  1413  			if isDocument {
  1414  				return false
  1415  			}
  1416  		}
  1417  		if item.Name == leaf {
  1418  			pathIDOut = item.Id
  1419  			return true
  1420  		}
  1421  		return false
  1422  	})
  1423  	return pathIDOut, found, err
  1424  }
  1425  
  1426  // CreateDir makes a directory with pathID as parent and name leaf
  1427  func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
  1428  	leaf = f.opt.Enc.FromStandardName(leaf)
  1429  	// fmt.Println("Making", path)
  1430  	// Define the metadata for the directory we are going to create.
  1431  	pathID = actualID(pathID)
  1432  	createInfo := &drive.File{
  1433  		Name:        leaf,
  1434  		Description: leaf,
  1435  		MimeType:    driveFolderType,
  1436  		Parents:     []string{pathID},
  1437  	}
  1438  	var info *drive.File
  1439  	err = f.pacer.Call(func() (bool, error) {
  1440  		info, err = f.svc.Files.Create(createInfo).
  1441  			Fields("id").
  1442  			SupportsAllDrives(true).
  1443  			Do()
  1444  		return f.shouldRetry(err)
  1445  	})
  1446  	if err != nil {
  1447  		return "", err
  1448  	}
  1449  	return info.Id, nil
  1450  }
  1451  
  1452  // isAuthOwned checks if any of the item owners is the authenticated owner
  1453  func isAuthOwned(item *drive.File) bool {
  1454  	for _, owner := range item.Owners {
  1455  		if owner.Me {
  1456  			return true
  1457  		}
  1458  	}
  1459  	return false
  1460  }
  1461  
  1462  // linkTemplate returns the Template for a MIME type or nil if the
  1463  // MIME type does not represent a link
  1464  func linkTemplate(mt string) *template.Template {
  1465  	templatesOnce.Do(func() {
  1466  		_linkTemplates = map[string]*template.Template{
  1467  			"application/x-link-desktop": template.Must(
  1468  				template.New("application/x-link-desktop").Parse(desktopTemplate)),
  1469  			"application/x-link-html": template.Must(
  1470  				template.New("application/x-link-html").Parse(htmlTemplate)),
  1471  			"application/x-link-url": template.Must(
  1472  				template.New("application/x-link-url").Parse(urlTemplate)),
  1473  			"application/x-link-webloc": template.Must(
  1474  				template.New("application/x-link-webloc").Parse(weblocTemplate)),
  1475  		}
  1476  	})
  1477  	return _linkTemplates[mt]
  1478  }
  1479  func (f *Fs) fetchFormats() {
  1480  	fetchFormatsOnce.Do(func() {
  1481  		var about *drive.About
  1482  		var err error
  1483  		err = f.pacer.Call(func() (bool, error) {
  1484  			about, err = f.svc.About.Get().
  1485  				Fields("exportFormats,importFormats").
  1486  				Do()
  1487  			return f.shouldRetry(err)
  1488  		})
  1489  		if err != nil {
  1490  			fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
  1491  			_exportFormats = map[string][]string{}
  1492  			_importFormats = map[string][]string{}
  1493  			return
  1494  		}
  1495  		_exportFormats = fixMimeTypeMap(about.ExportFormats)
  1496  		_importFormats = fixMimeTypeMap(about.ImportFormats)
  1497  	})
  1498  }
  1499  
  1500  // exportFormats returns the export formats from drive, fetching them
  1501  // if necessary.
  1502  //
  1503  // if the fetch fails then it will not export any drive formats
  1504  func (f *Fs) exportFormats() map[string][]string {
  1505  	f.fetchFormats()
  1506  	return _exportFormats
  1507  }
  1508  
  1509  // importFormats returns the import formats from drive, fetching them
  1510  // if necessary.
  1511  //
  1512  // if the fetch fails then it will not import any drive formats
  1513  func (f *Fs) importFormats() map[string][]string {
  1514  	f.fetchFormats()
  1515  	return _importFormats
  1516  }
  1517  
  1518  // findExportFormatByMimeType works out the optimum export settings
  1519  // for the given MIME type.
  1520  //
  1521  // Look through the exportExtensions and find the first format that can be
  1522  // converted.  If none found then return ("", "", false)
  1523  func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
  1524  	extension, mimeType string, isDocument bool) {
  1525  	exportMimeTypes, isDocument := f.exportFormats()[itemMimeType]
  1526  	if isDocument {
  1527  		for _, _extension := range f.exportExtensions {
  1528  			_mimeType := mime.TypeByExtension(_extension)
  1529  			if isLinkMimeType(_mimeType) {
  1530  				return _extension, _mimeType, true
  1531  			}
  1532  			for _, emt := range exportMimeTypes {
  1533  				if emt == _mimeType {
  1534  					return _extension, emt, true
  1535  				}
  1536  				if _mimeType == _mimeTypeCustomTransform[emt] {
  1537  					return _extension, emt, true
  1538  				}
  1539  			}
  1540  		}
  1541  	}
  1542  
  1543  	// else return empty
  1544  	return "", "", isDocument
  1545  }
  1546  
  1547  // findExportFormatByMimeType works out the optimum export settings
  1548  // for the given drive.File.
  1549  //
  1550  // Look through the exportExtensions and find the first format that can be
  1551  // converted.  If none found then return ("", "", "", false)
  1552  func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
  1553  	extension, mimeType, isDocument = f.findExportFormatByMimeType(item.MimeType)
  1554  	if extension != "" {
  1555  		filename = item.Name + extension
  1556  	}
  1557  	return
  1558  }
  1559  
  1560  // findImportFormat finds the matching upload MIME type for a file
  1561  // If the given MIME type is in importMimeTypes, the matching upload
  1562  // MIME type is returned
  1563  //
  1564  // When no match is found "" is returned.
  1565  func (f *Fs) findImportFormat(mimeType string) string {
  1566  	mimeType = fixMimeType(mimeType)
  1567  	ifs := f.importFormats()
  1568  	for _, mt := range f.importMimeTypes {
  1569  		if mt == mimeType {
  1570  			importMimeTypes := ifs[mimeType]
  1571  			if l := len(importMimeTypes); l > 0 {
  1572  				if l > 1 {
  1573  					fs.Infof(f, "found %d import formats for %q: %q", l, mimeType, importMimeTypes)
  1574  				}
  1575  				return importMimeTypes[0]
  1576  			}
  1577  		}
  1578  	}
  1579  	return ""
  1580  }
  1581  
  1582  // List the objects and directories in dir into entries.  The
  1583  // entries can be returned in any order but should be for a
  1584  // complete directory.
  1585  //
  1586  // dir should be "" to list the root, and should not have
  1587  // trailing slashes.
  1588  //
  1589  // This should return ErrDirNotFound if the directory isn't
  1590  // found.
  1591  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  1592  	err = f.dirCache.FindRoot(ctx, false)
  1593  	if err != nil {
  1594  		return nil, err
  1595  	}
  1596  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
  1597  	if err != nil {
  1598  		return nil, err
  1599  	}
  1600  	directoryID = actualID(directoryID)
  1601  
  1602  	var iErr error
  1603  	_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
  1604  		entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
  1605  		if err != nil {
  1606  			iErr = err
  1607  			return true
  1608  		}
  1609  		if entry != nil {
  1610  			entries = append(entries, entry)
  1611  		}
  1612  		return false
  1613  	})
  1614  	if err != nil {
  1615  		return nil, err
  1616  	}
  1617  	if iErr != nil {
  1618  		return nil, iErr
  1619  	}
  1620  	// If listing the root of a teamdrive and got no entries,
  1621  	// double check we have access
  1622  	if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
  1623  		err = f.teamDriveOK(ctx)
  1624  		if err != nil {
  1625  			return nil, err
  1626  		}
  1627  	}
  1628  	return entries, nil
  1629  }
  1630  
  1631  // listREntry is a task to be executed by a litRRunner
  1632  type listREntry struct {
  1633  	id, path string
  1634  }
  1635  
  1636  // listRSlices is a helper struct to sort two slices at once
  1637  type listRSlices struct {
  1638  	dirs  []string
  1639  	paths []string
  1640  }
  1641  
  1642  func (s listRSlices) Sort() {
  1643  	sort.Sort(s)
  1644  }
  1645  
  1646  func (s listRSlices) Len() int {
  1647  	return len(s.dirs)
  1648  }
  1649  
  1650  func (s listRSlices) Swap(i, j int) {
  1651  	s.dirs[i], s.dirs[j] = s.dirs[j], s.dirs[i]
  1652  	s.paths[i], s.paths[j] = s.paths[j], s.paths[i]
  1653  }
  1654  
  1655  func (s listRSlices) Less(i, j int) bool {
  1656  	return s.dirs[i] < s.dirs[j]
  1657  }
  1658  
  1659  // listRRunner will read dirIDs from the in channel, perform the file listing and call cb with each DirEntry.
  1660  //
  1661  // In each cycle it will read up to grouping entries from the in channel without blocking.
  1662  // If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
  1663  // nil is send to the out channel and the function returns.
  1664  func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error) {
  1665  	var dirs []string
  1666  	var paths []string
  1667  	var grouping int32
  1668  
  1669  	for dir := range in {
  1670  		dirs = append(dirs[:0], dir.id)
  1671  		paths = append(paths[:0], dir.path)
  1672  		grouping = atomic.LoadInt32(&f.grouping)
  1673  	waitloop:
  1674  		for i := int32(1); i < grouping; i++ {
  1675  			select {
  1676  			case d, ok := <-in:
  1677  				if !ok {
  1678  					break waitloop
  1679  				}
  1680  				dirs = append(dirs, d.id)
  1681  				paths = append(paths, d.path)
  1682  			default:
  1683  			}
  1684  		}
  1685  		listRSlices{dirs, paths}.Sort()
  1686  		var iErr error
  1687  		foundItems := false
  1688  		_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
  1689  			// shared with me items have no parents when at the root
  1690  			if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
  1691  				item.Parents = dirs
  1692  			}
  1693  			for _, parent := range item.Parents {
  1694  				var i int
  1695  				foundItems = true
  1696  				earlyExit := false
  1697  				// If only one item in paths then no need to search for the ID
  1698  				// assuming google drive is doing its job properly.
  1699  				//
  1700  				// Note that we at the root when len(paths) == 1 && paths[0] == ""
  1701  				if len(paths) == 1 {
  1702  					// don't check parents at root because
  1703  					// - shared with me items have no parents at the root
  1704  					// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
  1705  					i = 0
  1706  					// items at root can have more than one parent so we need to put
  1707  					// the item in just once.
  1708  					earlyExit = true
  1709  				} else {
  1710  					// only handle parents that are in the requested dirs list if not at root
  1711  					i = sort.SearchStrings(dirs, parent)
  1712  					if i == len(dirs) || dirs[i] != parent {
  1713  						continue
  1714  					}
  1715  				}
  1716  				remote := path.Join(paths[i], item.Name)
  1717  				entry, err := f.itemToDirEntry(remote, item)
  1718  				if err != nil {
  1719  					iErr = err
  1720  					return true
  1721  				}
  1722  
  1723  				err = cb(entry)
  1724  				if err != nil {
  1725  					iErr = err
  1726  					return true
  1727  				}
  1728  
  1729  				// If didn't check parents then insert only once
  1730  				if earlyExit {
  1731  					break
  1732  				}
  1733  			}
  1734  			return false
  1735  		})
  1736  		// Found no items in more than one directory. Retry these as
  1737  		// individual directories This is to work around a bug in google
  1738  		// drive where (A in parents) or (B in parents) returns nothing
  1739  		// sometimes. See #3114, #4289 and
  1740  		// https://issuetracker.google.com/issues/149522397
  1741  		if len(dirs) > 1 && !foundItems {
  1742  			if atomic.SwapInt32(&f.grouping, 1) != 1 {
  1743  				fs.Logf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
  1744  			}
  1745  			var recycled = make([]listREntry, len(dirs))
  1746  			f.listRmu.Lock()
  1747  			for i := range dirs {
  1748  				recycled[i] = listREntry{id: dirs[i], path: paths[i]}
  1749  				// Make a note of these dirs - if they all turn
  1750  				// out to be empty then we can re-enable grouping
  1751  				f.listRempties[dirs[i]] = struct{}{}
  1752  			}
  1753  			f.listRmu.Unlock()
  1754  			// recycle these in the background so we don't deadlock
  1755  			// the listR runners if they all get here
  1756  			wg.Add(len(recycled))
  1757  			go func() {
  1758  				for _, entry := range recycled {
  1759  					in <- entry
  1760  				}
  1761  				fs.Debugf(f, "Recycled %d entries", len(recycled))
  1762  			}()
  1763  		}
  1764  		// If using a grouping of 1 and dir was empty then check to see if it
  1765  		// is part of the group that caused grouping to be disabled.
  1766  		if grouping == 1 && len(dirs) == 1 && !foundItems {
  1767  			f.listRmu.Lock()
  1768  			if _, found := f.listRempties[dirs[0]]; found {
  1769  				// Remove the ID
  1770  				delete(f.listRempties, dirs[0])
  1771  				// If no empties left => all the directories that
  1772  				// triggered the grouping being set to 1 were actually
  1773  				// empty so must have made a mistake
  1774  				if len(f.listRempties) == 0 {
  1775  					if atomic.SwapInt32(&f.grouping, listRGrouping) != listRGrouping {
  1776  						fs.Logf(f, "Re-enabling ListR as previous detection was in error")
  1777  					}
  1778  				}
  1779  			}
  1780  			f.listRmu.Unlock()
  1781  		}
  1782  
  1783  		for range dirs {
  1784  			wg.Done()
  1785  		}
  1786  
  1787  		if iErr != nil {
  1788  			out <- iErr
  1789  			return
  1790  		}
  1791  
  1792  		if err != nil {
  1793  			out <- err
  1794  			return
  1795  		}
  1796  	}
  1797  	out <- nil
  1798  }
  1799  
  1800  // ListR lists the objects and directories of the Fs starting
  1801  // from dir recursively into out.
  1802  //
  1803  // dir should be "" to start from the root, and should not
  1804  // have trailing slashes.
  1805  //
  1806  // This should return ErrDirNotFound if the directory isn't
  1807  // found.
  1808  //
  1809  // It should call callback for each tranche of entries read.
  1810  // These need not be returned in any particular order.  If
  1811  // callback returns an error then the listing will stop
  1812  // immediately.
  1813  //
  1814  // Don't implement this unless you have a more efficient way
  1815  // of listing recursively that doing a directory traversal.
  1816  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1817  	err = f.dirCache.FindRoot(ctx, false)
  1818  	if err != nil {
  1819  		return err
  1820  	}
  1821  	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
  1822  	if err != nil {
  1823  		return err
  1824  	}
  1825  	directoryID = actualID(directoryID)
  1826  
  1827  	mu := sync.Mutex{} // protects in and overflow
  1828  	wg := sync.WaitGroup{}
  1829  	in := make(chan listREntry, listRInputBuffer)
  1830  	out := make(chan error, fs.Config.Checkers)
  1831  	list := walk.NewListRHelper(callback)
  1832  	overflow := []listREntry{}
  1833  	listed := 0
  1834  
  1835  	cb := func(entry fs.DirEntry) error {
  1836  		mu.Lock()
  1837  		defer mu.Unlock()
  1838  		if d, isDir := entry.(*fs.Dir); isDir && in != nil {
  1839  			job := listREntry{actualID(d.ID()), d.Remote()}
  1840  			select {
  1841  			case in <- job:
  1842  				// Adding the wg after we've entered the item is
  1843  				// safe here because we know when the callback
  1844  				// is called we are holding a waitgroup.
  1845  				wg.Add(1)
  1846  			default:
  1847  				overflow = append(overflow, job)
  1848  			}
  1849  		}
  1850  		listed++
  1851  		return list.Add(entry)
  1852  	}
  1853  
  1854  	wg.Add(1)
  1855  	in <- listREntry{directoryID, dir}
  1856  
  1857  	for i := 0; i < fs.Config.Checkers; i++ {
  1858  		go f.listRRunner(ctx, &wg, in, out, cb)
  1859  	}
  1860  	go func() {
  1861  		// wait until the all directories are processed
  1862  		wg.Wait()
  1863  		// if the input channel overflowed add the collected entries to the channel now
  1864  		for len(overflow) > 0 {
  1865  			mu.Lock()
  1866  			l := len(overflow)
  1867  			// only fill half of the channel to prevent entries being put into overflow again
  1868  			if l > listRInputBuffer/2 {
  1869  				l = listRInputBuffer / 2
  1870  			}
  1871  			wg.Add(l)
  1872  			for _, d := range overflow[:l] {
  1873  				in <- d
  1874  			}
  1875  			overflow = overflow[l:]
  1876  			mu.Unlock()
  1877  
  1878  			// wait again for the completion of all directories
  1879  			wg.Wait()
  1880  		}
  1881  		mu.Lock()
  1882  		if in != nil {
  1883  			// notify all workers to exit
  1884  			close(in)
  1885  			in = nil
  1886  		}
  1887  		mu.Unlock()
  1888  	}()
  1889  	// wait until the all workers to finish
  1890  	for i := 0; i < fs.Config.Checkers; i++ {
  1891  		e := <-out
  1892  		mu.Lock()
  1893  		// if one worker returns an error early, close the input so all other workers exit
  1894  		if e != nil && in != nil {
  1895  			err = e
  1896  			close(in)
  1897  			in = nil
  1898  		}
  1899  		mu.Unlock()
  1900  	}
  1901  
  1902  	close(out)
  1903  	if err != nil {
  1904  		return err
  1905  	}
  1906  
  1907  	err = list.Flush()
  1908  	if err != nil {
  1909  		return err
  1910  	}
  1911  
  1912  	// If listing the root of a teamdrive and got no entries,
  1913  	// double check we have access
  1914  	if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
  1915  		err = f.teamDriveOK(ctx)
  1916  		if err != nil {
  1917  			return err
  1918  		}
  1919  	}
  1920  
  1921  	return nil
  1922  }
  1923  
  1924  const shortcutSeparator = '\t'
  1925  
  1926  // joinID adds an actual drive ID to the shortcut ID it came from
  1927  //
  1928  // directoryIDs in the dircache are these composite directory IDs so
  1929  // we must always unpack them before use.
  1930  func joinID(actual, shortcut string) string {
  1931  	return actual + string(shortcutSeparator) + shortcut
  1932  }
  1933  
  1934  // splitID separates an actual ID and a shortcut ID from a composite
  1935  // ID. If there was no shortcut ID then it will return "" for it.
  1936  func splitID(compositeID string) (actualID, shortcutID string) {
  1937  	i := strings.IndexRune(compositeID, shortcutSeparator)
  1938  	if i < 0 {
  1939  		return compositeID, ""
  1940  	}
  1941  	return compositeID[:i], compositeID[i+1:]
  1942  }
  1943  
  1944  // isShortcutID returns true if compositeID refers to a shortcut
  1945  func isShortcutID(compositeID string) bool {
  1946  	return strings.IndexRune(compositeID, shortcutSeparator) >= 0
  1947  }
  1948  
  1949  // actualID returns an actual ID from a composite ID
  1950  func actualID(compositeID string) (actualID string) {
  1951  	actualID, _ = splitID(compositeID)
  1952  	return actualID
  1953  }
  1954  
  1955  // shortcutID returns a shortcut ID from a composite ID if available,
  1956  // or the actual ID if not.
  1957  func shortcutID(compositeID string) (shortcutID string) {
  1958  	actualID, shortcutID := splitID(compositeID)
  1959  	if shortcutID != "" {
  1960  		return shortcutID
  1961  	}
  1962  	return actualID
  1963  }
  1964  
  1965  // isShortcut returns true of the item is a shortcut
  1966  func isShortcut(item *drive.File) bool {
  1967  	return item.MimeType == shortcutMimeType && item.ShortcutDetails != nil
  1968  }
  1969  
  1970  // Dereference shortcut if required. It returns the newItem (which may
  1971  // be just item).
  1972  //
  1973  // If we return a new item then the ID will be adjusted to be a
  1974  // composite of the actual ID and the shortcut ID. This is to make
  1975  // sure that we have decided in all use places what we are doing with
  1976  // the ID.
  1977  //
  1978  // Note that we assume shortcuts can't point to shortcuts. Google
  1979  // drive web interface doesn't offer the option to create a shortcut
  1980  // to a shortcut. The documentation is silent on the issue.
  1981  func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) {
  1982  	if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
  1983  		return item, nil
  1984  	}
  1985  	if item.ShortcutDetails == nil {
  1986  		fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
  1987  		return item, nil
  1988  	}
  1989  	newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
  1990  	if err != nil {
  1991  		return nil, errors.Wrap(err, "failed to resolve shortcut")
  1992  	}
  1993  	// make sure we use the Name, Parents and Trashed from the original item
  1994  	newItem.Name = item.Name
  1995  	newItem.Parents = item.Parents
  1996  	newItem.Trashed = item.Trashed
  1997  	// the new ID is a composite ID
  1998  	newItem.Id = joinID(newItem.Id, item.Id)
  1999  	return newItem, nil
  2000  }
  2001  
  2002  // itemToDirEntry converts a drive.File to an fs.DirEntry.
  2003  // When the drive.File cannot be represented as an fs.DirEntry
  2004  // (nil, nil) is returned.
  2005  func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
  2006  	switch {
  2007  	case item.MimeType == driveFolderType:
  2008  		// cache the directory ID for later lookups
  2009  		f.dirCache.Put(remote, item.Id)
  2010  		when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
  2011  		d := fs.NewDir(remote, when).SetID(item.Id)
  2012  		return d, nil
  2013  	case f.opt.AuthOwnerOnly && !isAuthOwned(item):
  2014  		// ignore object
  2015  	default:
  2016  		entry, err = f.newObjectWithInfo(remote, item)
  2017  		if err == fs.ErrorObjectNotFound {
  2018  			return nil, nil
  2019  		}
  2020  		return entry, err
  2021  	}
  2022  	return nil, nil
  2023  }
  2024  
  2025  // Creates a drive.File info from the parameters passed in.
  2026  //
  2027  // Used to create new objects
  2028  func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) {
  2029  	leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
  2030  	if err != nil {
  2031  		return nil, err
  2032  	}
  2033  	directoryID = actualID(directoryID)
  2034  
  2035  	leaf = f.opt.Enc.FromStandardName(leaf)
  2036  	// Define the metadata for the file we are going to create.
  2037  	createInfo := &drive.File{
  2038  		Name:         leaf,
  2039  		Description:  leaf,
  2040  		Parents:      []string{directoryID},
  2041  		ModifiedTime: modTime.Format(timeFormatOut),
  2042  	}
  2043  	return createInfo, nil
  2044  }
  2045  
  2046  // Put the object
  2047  //
  2048  // Copy the reader in to the new object which is returned
  2049  //
  2050  // The new object may have been created if an error is returned
  2051  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  2052  	exisitingObj, err := f.NewObject(ctx, src.Remote())
  2053  	switch err {
  2054  	case nil:
  2055  		return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
  2056  	case fs.ErrorObjectNotFound:
  2057  		// Not found so create it
  2058  		return f.PutUnchecked(ctx, in, src, options...)
  2059  	default:
  2060  		return nil, err
  2061  	}
  2062  }
  2063  
  2064  // PutStream uploads to the remote path with the modTime given of indeterminate size
  2065  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  2066  	return f.Put(ctx, in, src, options...)
  2067  }
  2068  
  2069  // PutUnchecked uploads the object
  2070  //
  2071  // This will create a duplicate if we upload a new file without
  2072  // checking to see if there is one already - use Put() for that.
  2073  func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  2074  	remote := src.Remote()
  2075  	size := src.Size()
  2076  	modTime := src.ModTime(ctx)
  2077  	srcMimeType := fs.MimeTypeFromName(remote)
  2078  	srcExt := path.Ext(remote)
  2079  	exportExt := ""
  2080  	importMimeType := ""
  2081  
  2082  	if f.importMimeTypes != nil && !f.opt.SkipGdocs {
  2083  		importMimeType = f.findImportFormat(srcMimeType)
  2084  
  2085  		if isInternalMimeType(importMimeType) {
  2086  			remote = remote[:len(remote)-len(srcExt)]
  2087  
  2088  			exportExt, _, _ = f.findExportFormatByMimeType(importMimeType)
  2089  			if exportExt == "" {
  2090  				return nil, errors.Errorf("No export format found for %q", importMimeType)
  2091  			}
  2092  			if exportExt != srcExt && !f.opt.AllowImportNameChange {
  2093  				return nil, errors.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
  2094  			}
  2095  		}
  2096  	}
  2097  
  2098  	createInfo, err := f.createFileInfo(ctx, remote, modTime)
  2099  	if err != nil {
  2100  		return nil, err
  2101  	}
  2102  	if importMimeType != "" {
  2103  		createInfo.MimeType = importMimeType
  2104  	} else {
  2105  		createInfo.MimeType = fs.MimeTypeFromName(remote)
  2106  	}
  2107  
  2108  	var info *drive.File
  2109  	if size >= 0 && size < int64(f.opt.UploadCutoff) {
  2110  		// Make the API request to upload metadata and file data.
  2111  		// Don't retry, return a retry error instead
  2112  		err = f.pacer.CallNoRetry(func() (bool, error) {
  2113  			info, err = f.svc.Files.Create(createInfo).
  2114  				Media(in, googleapi.ContentType(srcMimeType)).
  2115  				Fields(partialFields).
  2116  				SupportsAllDrives(true).
  2117  				KeepRevisionForever(f.opt.KeepRevisionForever).
  2118  				Do()
  2119  			return f.shouldRetry(err)
  2120  		})
  2121  		if err != nil {
  2122  			return nil, err
  2123  		}
  2124  	} else {
  2125  		// Upload the file in chunks
  2126  		info, err = f.Upload(ctx, in, size, srcMimeType, "", remote, createInfo)
  2127  		if err != nil {
  2128  			return nil, err
  2129  		}
  2130  	}
  2131  	return f.newObjectWithInfo(remote, info)
  2132  }
  2133  
  2134  // MergeDirs merges the contents of all the directories passed
  2135  // in into the first one and rmdirs the other directories.
  2136  func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
  2137  	if len(dirs) < 2 {
  2138  		return nil
  2139  	}
  2140  	newDirs := dirs[:0]
  2141  	for _, dir := range dirs {
  2142  		if isShortcutID(dir.ID()) {
  2143  			fs.Infof(dir, "skipping shortcut directory")
  2144  			continue
  2145  		}
  2146  		newDirs = append(newDirs, dir)
  2147  	}
  2148  	dirs = newDirs
  2149  	if len(dirs) < 2 {
  2150  		return nil
  2151  	}
  2152  	dstDir := dirs[0]
  2153  	for _, srcDir := range dirs[1:] {
  2154  		// list the objects
  2155  		infos := []*drive.File{}
  2156  		_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
  2157  			infos = append(infos, info)
  2158  			return false
  2159  		})
  2160  		if err != nil {
  2161  			return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
  2162  		}
  2163  		// move them into place
  2164  		for _, info := range infos {
  2165  			fs.Infof(srcDir, "merging %q", info.Name)
  2166  			// Move the file into the destination
  2167  			err = f.pacer.Call(func() (bool, error) {
  2168  				_, err = f.svc.Files.Update(info.Id, nil).
  2169  					RemoveParents(srcDir.ID()).
  2170  					AddParents(dstDir.ID()).
  2171  					Fields("").
  2172  					SupportsAllDrives(true).
  2173  					Do()
  2174  				return f.shouldRetry(err)
  2175  			})
  2176  			if err != nil {
  2177  				return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
  2178  			}
  2179  		}
  2180  		// rmdir (into trash) the now empty source directory
  2181  		fs.Infof(srcDir, "removing empty directory")
  2182  		err = f.delete(ctx, srcDir.ID(), true)
  2183  		if err != nil {
  2184  			return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
  2185  		}
  2186  	}
  2187  	return nil
  2188  }
  2189  
  2190  // Mkdir creates the container if it doesn't exist
  2191  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  2192  	err := f.dirCache.FindRoot(ctx, true)
  2193  	if err != nil {
  2194  		return err
  2195  	}
  2196  	if dir != "" {
  2197  		_, err = f.dirCache.FindDir(ctx, dir, true)
  2198  	}
  2199  	return err
  2200  }
  2201  
  2202  // delete a file or directory unconditionally by ID
  2203  func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
  2204  	return f.pacer.Call(func() (bool, error) {
  2205  		var err error
  2206  		if useTrash {
  2207  			info := drive.File{
  2208  				Trashed: true,
  2209  			}
  2210  			_, err = f.svc.Files.Update(id, &info).
  2211  				Fields("").
  2212  				SupportsAllDrives(true).
  2213  				Do()
  2214  		} else {
  2215  			err = f.svc.Files.Delete(id).
  2216  				Fields("").
  2217  				SupportsAllDrives(true).
  2218  				Do()
  2219  		}
  2220  		return f.shouldRetry(err)
  2221  	})
  2222  }
  2223  
  2224  // Rmdir deletes a directory
  2225  //
  2226  // Returns an error if it isn't empty
  2227  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  2228  	root := path.Join(f.root, dir)
  2229  	dc := f.dirCache
  2230  	directoryID, err := dc.FindDir(ctx, dir, false)
  2231  	if err != nil {
  2232  		return err
  2233  	}
  2234  	directoryID, shortcutID := splitID(directoryID)
  2235  	// if directory is a shortcut remove it regardless
  2236  	if shortcutID != "" {
  2237  		return f.delete(ctx, shortcutID, f.opt.UseTrash)
  2238  	}
  2239  	var trashedFiles = false
  2240  	found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
  2241  		if !item.Trashed {
  2242  			fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
  2243  			return true
  2244  		}
  2245  		fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
  2246  		trashedFiles = true
  2247  		return false
  2248  	})
  2249  	if err != nil {
  2250  		return err
  2251  	}
  2252  	if found {
  2253  		return errors.Errorf("directory not empty")
  2254  	}
  2255  	if root != "" {
  2256  		// trash the directory if it had trashed files
  2257  		// in or the user wants to trash, otherwise
  2258  		// delete it.
  2259  		err = f.delete(ctx, directoryID, trashedFiles || f.opt.UseTrash)
  2260  		if err != nil {
  2261  			return err
  2262  		}
  2263  	}
  2264  	f.dirCache.FlushDir(dir)
  2265  	if err != nil {
  2266  		return err
  2267  	}
  2268  	return nil
  2269  }
  2270  
  2271  // Precision of the object storage system
  2272  func (f *Fs) Precision() time.Duration {
  2273  	return time.Millisecond
  2274  }
  2275  
  2276  // Copy src to this remote using server side copy operations.
  2277  //
  2278  // This is stored with the remote path given
  2279  //
  2280  // It returns the destination Object and a possible error
  2281  //
  2282  // Will only be called if src.Fs().Name() == f.Name()
  2283  //
  2284  // If it isn't possible then return fs.ErrorCantCopy
  2285  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  2286  	var srcObj *baseObject
  2287  	ext := ""
  2288  	readDescription := false
  2289  	switch src := src.(type) {
  2290  	case *Object:
  2291  		srcObj = &src.baseObject
  2292  	case *documentObject:
  2293  		srcObj, ext = &src.baseObject, src.ext()
  2294  		readDescription = true
  2295  	case *linkObject:
  2296  		srcObj, ext = &src.baseObject, src.ext()
  2297  	default:
  2298  		fs.Debugf(src, "Can't copy - not same remote type")
  2299  		return nil, fs.ErrorCantCopy
  2300  	}
  2301  
  2302  	if ext != "" {
  2303  		if !strings.HasSuffix(remote, ext) {
  2304  			fs.Debugf(src, "Can't copy - not same document type")
  2305  			return nil, fs.ErrorCantCopy
  2306  		}
  2307  		remote = remote[:len(remote)-len(ext)]
  2308  	}
  2309  
  2310  	// Look to see if there is an existing object
  2311  	existingObject, _ := f.NewObject(ctx, remote)
  2312  
  2313  	createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
  2314  	if err != nil {
  2315  		return nil, err
  2316  	}
  2317  
  2318  	if readDescription {
  2319  		// preserve the description on copy for docs
  2320  		info, err := f.getFile(actualID(srcObj.id), "description")
  2321  		if err != nil {
  2322  			return nil, errors.Wrap(err, "failed to read description for Google Doc")
  2323  		}
  2324  		createInfo.Description = info.Description
  2325  	} else {
  2326  		// don't overwrite the description on copy for files
  2327  		// this should work for docs but it doesn't - it is probably a bug in Google Drive
  2328  		createInfo.Description = ""
  2329  	}
  2330  
  2331  	// get the ID of the thing to copy - this is the shortcut if available
  2332  	id := shortcutID(srcObj.id)
  2333  
  2334  	var info *drive.File
  2335  	err = f.pacer.Call(func() (bool, error) {
  2336  		info, err = f.svc.Files.Copy(id, createInfo).
  2337  			Fields(partialFields).
  2338  			SupportsAllDrives(true).
  2339  			KeepRevisionForever(f.opt.KeepRevisionForever).
  2340  			Do()
  2341  		return f.shouldRetry(err)
  2342  	})
  2343  	if err != nil {
  2344  		return nil, err
  2345  	}
  2346  	newObject, err := f.newObjectWithInfo(remote, info)
  2347  	if err != nil {
  2348  		return nil, err
  2349  	}
  2350  	if existingObject != nil {
  2351  		err = existingObject.Remove(ctx)
  2352  		if err != nil {
  2353  			fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
  2354  		}
  2355  	}
  2356  	return newObject, nil
  2357  }
  2358  
  2359  // Purge deletes all the files and the container
  2360  //
  2361  // Optional interface: Only implement this if you have a way of
  2362  // deleting all the files quicker than just running Remove() on the
  2363  // result of List()
  2364  func (f *Fs) Purge(ctx context.Context) error {
  2365  	if f.root == "" {
  2366  		return errors.New("can't purge root directory")
  2367  	}
  2368  	if f.opt.TrashedOnly {
  2369  		return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
  2370  	}
  2371  	err := f.dirCache.FindRoot(ctx, false)
  2372  	if err != nil {
  2373  		return err
  2374  	}
  2375  	err = f.delete(ctx, shortcutID(f.dirCache.RootID()), f.opt.UseTrash)
  2376  	f.dirCache.ResetRoot()
  2377  	if err != nil {
  2378  		return err
  2379  	}
  2380  	return nil
  2381  }
  2382  
  2383  // CleanUp empties the trash
  2384  func (f *Fs) CleanUp(ctx context.Context) error {
  2385  	err := f.pacer.Call(func() (bool, error) {
  2386  		err := f.svc.Files.EmptyTrash().Context(ctx).Do()
  2387  		return f.shouldRetry(err)
  2388  	})
  2389  
  2390  	if err != nil {
  2391  		return err
  2392  	}
  2393  	return nil
  2394  }
  2395  
  2396  // teamDriveOK checks to see if we can access the team drive
  2397  func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
  2398  	if !f.isTeamDrive {
  2399  		return nil
  2400  	}
  2401  	var td *drive.Drive
  2402  	err = f.pacer.Call(func() (bool, error) {
  2403  		td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
  2404  		return f.shouldRetry(err)
  2405  	})
  2406  	if err != nil {
  2407  		return errors.Wrap(err, "failed to get Team/Shared Drive info")
  2408  	}
  2409  	fs.Debugf(f, "read info from team drive %q", td.Name)
  2410  	return err
  2411  }
  2412  
  2413  // About gets quota information
  2414  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  2415  	if f.isTeamDrive {
  2416  		err := f.teamDriveOK(ctx)
  2417  		if err != nil {
  2418  			return nil, err
  2419  		}
  2420  		// Teamdrives don't appear to have a usage API so just return empty
  2421  		return &fs.Usage{}, nil
  2422  	}
  2423  	var about *drive.About
  2424  	var err error
  2425  	err = f.pacer.Call(func() (bool, error) {
  2426  		about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
  2427  		return f.shouldRetry(err)
  2428  	})
  2429  	if err != nil {
  2430  		return nil, errors.Wrap(err, "failed to get Drive storageQuota")
  2431  	}
  2432  	q := about.StorageQuota
  2433  	usage := &fs.Usage{
  2434  		Used:    fs.NewUsageValue(q.UsageInDrive),           // bytes in use
  2435  		Trashed: fs.NewUsageValue(q.UsageInDriveTrash),      // bytes in trash
  2436  		Other:   fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage eg gmail in drive
  2437  	}
  2438  	if q.Limit > 0 {
  2439  		usage.Total = fs.NewUsageValue(q.Limit)          // quota of bytes that can be used
  2440  		usage.Free = fs.NewUsageValue(q.Limit - q.Usage) // bytes which can be uploaded before reaching the quota
  2441  	}
  2442  	return usage, nil
  2443  }
  2444  
  2445  // Move src to this remote using server side move operations.
  2446  //
  2447  // This is stored with the remote path given
  2448  //
  2449  // It returns the destination Object and a possible error
  2450  //
  2451  // Will only be called if src.Fs().Name() == f.Name()
  2452  //
  2453  // If it isn't possible then return fs.ErrorCantMove
  2454  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  2455  	var srcObj *baseObject
  2456  	ext := ""
  2457  	switch src := src.(type) {
  2458  	case *Object:
  2459  		srcObj = &src.baseObject
  2460  	case *documentObject:
  2461  		srcObj, ext = &src.baseObject, src.ext()
  2462  	case *linkObject:
  2463  		srcObj, ext = &src.baseObject, src.ext()
  2464  	default:
  2465  		fs.Debugf(src, "Can't move - not same remote type")
  2466  		return nil, fs.ErrorCantMove
  2467  	}
  2468  
  2469  	if ext != "" {
  2470  		if !strings.HasSuffix(remote, ext) {
  2471  			fs.Debugf(src, "Can't move - not same document type")
  2472  			return nil, fs.ErrorCantMove
  2473  		}
  2474  		remote = remote[:len(remote)-len(ext)]
  2475  	}
  2476  
  2477  	_, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false)
  2478  	if err != nil {
  2479  		return nil, err
  2480  	}
  2481  	srcParentID = actualID(srcParentID)
  2482  
  2483  	// Temporary Object under construction
  2484  	dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
  2485  	if err != nil {
  2486  		return nil, err
  2487  	}
  2488  	dstParents := strings.Join(dstInfo.Parents, ",")
  2489  	dstInfo.Parents = nil
  2490  
  2491  	// Do the move
  2492  	var info *drive.File
  2493  	err = f.pacer.Call(func() (bool, error) {
  2494  		info, err = f.svc.Files.Update(shortcutID(srcObj.id), dstInfo).
  2495  			RemoveParents(srcParentID).
  2496  			AddParents(dstParents).
  2497  			Fields(partialFields).
  2498  			SupportsAllDrives(true).
  2499  			Do()
  2500  		return f.shouldRetry(err)
  2501  	})
  2502  	if err != nil {
  2503  		return nil, err
  2504  	}
  2505  
  2506  	return f.newObjectWithInfo(remote, info)
  2507  }
  2508  
  2509  // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
  2510  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
  2511  	id, err := f.dirCache.FindDir(ctx, remote, false)
  2512  	if err == nil {
  2513  		fs.Debugf(f, "attempting to share directory '%s'", remote)
  2514  		id = shortcutID(id)
  2515  	} else {
  2516  		fs.Debugf(f, "attempting to share single file '%s'", remote)
  2517  		o, err := f.NewObject(ctx, remote)
  2518  		if err != nil {
  2519  			return "", err
  2520  		}
  2521  		id = shortcutID(o.(fs.IDer).ID())
  2522  	}
  2523  
  2524  	permission := &drive.Permission{
  2525  		AllowFileDiscovery: false,
  2526  		Role:               "reader",
  2527  		Type:               "anyone",
  2528  	}
  2529  
  2530  	err = f.pacer.Call(func() (bool, error) {
  2531  		// TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
  2532  		// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
  2533  		_, err = f.svc.Permissions.Create(id, permission).
  2534  			Fields("").
  2535  			SupportsAllDrives(true).
  2536  			Do()
  2537  		return f.shouldRetry(err)
  2538  	})
  2539  	if err != nil {
  2540  		return "", err
  2541  	}
  2542  	return fmt.Sprintf("https://drive.google.com/open?id=%s", id), nil
  2543  }
  2544  
  2545  // DirMove moves src, srcRemote to this remote at dstRemote
  2546  // using server side move operations.
  2547  //
  2548  // Will only be called if src.Fs().Name() == f.Name()
  2549  //
  2550  // If it isn't possible then return fs.ErrorCantDirMove
  2551  //
  2552  // If destination exists then return fs.ErrorDirExists
  2553  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  2554  	srcFs, ok := src.(*Fs)
  2555  	if !ok {
  2556  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  2557  		return fs.ErrorCantDirMove
  2558  	}
  2559  	srcPath := path.Join(srcFs.root, srcRemote)
  2560  	dstPath := path.Join(f.root, dstRemote)
  2561  
  2562  	// Refuse to move to or from the root
  2563  	if srcPath == "" || dstPath == "" {
  2564  		fs.Debugf(src, "DirMove error: Can't move root")
  2565  		return errors.New("can't move root directory")
  2566  	}
  2567  
  2568  	// find the root src directory
  2569  	err := srcFs.dirCache.FindRoot(ctx, false)
  2570  	if err != nil {
  2571  		return err
  2572  	}
  2573  
  2574  	// find the root dst directory
  2575  	if dstRemote != "" {
  2576  		err = f.dirCache.FindRoot(ctx, true)
  2577  		if err != nil {
  2578  			return err
  2579  		}
  2580  	} else {
  2581  		if f.dirCache.FoundRoot() {
  2582  			return fs.ErrorDirExists
  2583  		}
  2584  	}
  2585  
  2586  	// Find ID of dst parent, creating subdirs if necessary
  2587  	var leaf, dstDirectoryID string
  2588  	findPath := dstRemote
  2589  	if dstRemote == "" {
  2590  		findPath = f.root
  2591  	}
  2592  	leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
  2593  	if err != nil {
  2594  		return err
  2595  	}
  2596  	dstDirectoryID = actualID(dstDirectoryID)
  2597  
  2598  	// Check destination does not exist
  2599  	if dstRemote != "" {
  2600  		_, err = f.dirCache.FindDir(ctx, dstRemote, false)
  2601  		if err == fs.ErrorDirNotFound {
  2602  			// OK
  2603  		} else if err != nil {
  2604  			return err
  2605  		} else {
  2606  			return fs.ErrorDirExists
  2607  		}
  2608  	}
  2609  
  2610  	// Find ID of src parent
  2611  	var srcDirectoryID string
  2612  	if srcRemote == "" {
  2613  		srcDirectoryID, err = srcFs.dirCache.RootParentID()
  2614  	} else {
  2615  		_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false)
  2616  	}
  2617  	if err != nil {
  2618  		return err
  2619  	}
  2620  	srcDirectoryID = actualID(srcDirectoryID)
  2621  
  2622  	// Find ID of src
  2623  	srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
  2624  	if err != nil {
  2625  		return err
  2626  	}
  2627  	// Do the move
  2628  	patch := drive.File{
  2629  		Name: leaf,
  2630  	}
  2631  	err = f.pacer.Call(func() (bool, error) {
  2632  		_, err = f.svc.Files.Update(shortcutID(srcID), &patch).
  2633  			RemoveParents(srcDirectoryID).
  2634  			AddParents(dstDirectoryID).
  2635  			Fields("").
  2636  			SupportsAllDrives(true).
  2637  			Do()
  2638  		return f.shouldRetry(err)
  2639  	})
  2640  	if err != nil {
  2641  		return err
  2642  	}
  2643  	srcFs.dirCache.FlushDir(srcRemote)
  2644  	return nil
  2645  }
  2646  
  2647  // ChangeNotify calls the passed function with a path that has had changes.
  2648  // If the implementation uses polling, it should adhere to the given interval.
  2649  //
  2650  // Automatically restarts itself in case of unexpected behavior of the remote.
  2651  //
  2652  // Close the returned channel to stop being notified.
  2653  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
  2654  	go func() {
  2655  		// get the StartPageToken early so all changes from now on get processed
  2656  		startPageToken, err := f.changeNotifyStartPageToken()
  2657  		if err != nil {
  2658  			fs.Infof(f, "Failed to get StartPageToken: %s", err)
  2659  		}
  2660  		var ticker *time.Ticker
  2661  		var tickerC <-chan time.Time
  2662  		for {
  2663  			select {
  2664  			case pollInterval, ok := <-pollIntervalChan:
  2665  				if !ok {
  2666  					if ticker != nil {
  2667  						ticker.Stop()
  2668  					}
  2669  					return
  2670  				}
  2671  				if ticker != nil {
  2672  					ticker.Stop()
  2673  					ticker, tickerC = nil, nil
  2674  				}
  2675  				if pollInterval != 0 {
  2676  					ticker = time.NewTicker(pollInterval)
  2677  					tickerC = ticker.C
  2678  				}
  2679  			case <-tickerC:
  2680  				if startPageToken == "" {
  2681  					startPageToken, err = f.changeNotifyStartPageToken()
  2682  					if err != nil {
  2683  						fs.Infof(f, "Failed to get StartPageToken: %s", err)
  2684  						continue
  2685  					}
  2686  				}
  2687  				fs.Debugf(f, "Checking for changes on remote")
  2688  				startPageToken, err = f.changeNotifyRunner(ctx, notifyFunc, startPageToken)
  2689  				if err != nil {
  2690  					fs.Infof(f, "Change notify listener failure: %s", err)
  2691  				}
  2692  			}
  2693  		}
  2694  	}()
  2695  }
  2696  func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
  2697  	var startPageToken *drive.StartPageToken
  2698  	err = f.pacer.Call(func() (bool, error) {
  2699  		changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
  2700  		if f.isTeamDrive {
  2701  			changes.DriveId(f.opt.TeamDriveID)
  2702  		}
  2703  		startPageToken, err = changes.Do()
  2704  		return f.shouldRetry(err)
  2705  	})
  2706  	if err != nil {
  2707  		return
  2708  	}
  2709  	return startPageToken.StartPageToken, nil
  2710  }
  2711  
  2712  func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
  2713  	pageToken := startPageToken
  2714  	for {
  2715  		var changeList *drive.ChangeList
  2716  
  2717  		err = f.pacer.Call(func() (bool, error) {
  2718  			changesCall := f.svc.Changes.List(pageToken).
  2719  				Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
  2720  			if f.opt.ListChunk > 0 {
  2721  				changesCall.PageSize(f.opt.ListChunk)
  2722  			}
  2723  			changesCall.SupportsAllDrives(true)
  2724  			changesCall.IncludeItemsFromAllDrives(true)
  2725  			if f.isTeamDrive {
  2726  				changesCall.DriveId(f.opt.TeamDriveID)
  2727  			}
  2728  			// If using appDataFolder then need to add Spaces
  2729  			if f.rootFolderID == "appDataFolder" {
  2730  				changesCall.Spaces("appDataFolder")
  2731  			}
  2732  			changeList, err = changesCall.Context(ctx).Do()
  2733  			return f.shouldRetry(err)
  2734  		})
  2735  		if err != nil {
  2736  			return
  2737  		}
  2738  
  2739  		type entryType struct {
  2740  			path      string
  2741  			entryType fs.EntryType
  2742  		}
  2743  		var pathsToClear []entryType
  2744  		for _, change := range changeList.Changes {
  2745  			// find the previous path
  2746  			if path, ok := f.dirCache.GetInv(change.FileId); ok {
  2747  				if change.File != nil && change.File.MimeType != driveFolderType {
  2748  					pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
  2749  				} else {
  2750  					pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
  2751  				}
  2752  			}
  2753  
  2754  			// find the new path
  2755  			if change.File != nil {
  2756  				change.File.Name = f.opt.Enc.ToStandardName(change.File.Name)
  2757  				changeType := fs.EntryDirectory
  2758  				if change.File.MimeType != driveFolderType {
  2759  					changeType = fs.EntryObject
  2760  				}
  2761  
  2762  				// translate the parent dir of this object
  2763  				if len(change.File.Parents) > 0 {
  2764  					for _, parent := range change.File.Parents {
  2765  						if parentPath, ok := f.dirCache.GetInv(parent); ok {
  2766  							// and append the drive file name to compute the full file name
  2767  							newPath := path.Join(parentPath, change.File.Name)
  2768  							// this will now clear the actual file too
  2769  							pathsToClear = append(pathsToClear, entryType{path: newPath, entryType: changeType})
  2770  						}
  2771  					}
  2772  				} else { // a true root object that is changed
  2773  					pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: changeType})
  2774  				}
  2775  			}
  2776  		}
  2777  
  2778  		visitedPaths := make(map[string]struct{})
  2779  		for _, entry := range pathsToClear {
  2780  			if _, ok := visitedPaths[entry.path]; ok {
  2781  				continue
  2782  			}
  2783  			visitedPaths[entry.path] = struct{}{}
  2784  			notifyFunc(entry.path, entry.entryType)
  2785  		}
  2786  
  2787  		switch {
  2788  		case changeList.NewStartPageToken != "":
  2789  			return changeList.NewStartPageToken, nil
  2790  		case changeList.NextPageToken != "":
  2791  			pageToken = changeList.NextPageToken
  2792  		default:
  2793  			return
  2794  		}
  2795  	}
  2796  }
  2797  
  2798  // DirCacheFlush resets the directory cache - used in testing as an
  2799  // optional interface
  2800  func (f *Fs) DirCacheFlush() {
  2801  	f.dirCache.ResetRoot()
  2802  }
  2803  
  2804  // Hashes returns the supported hash sets.
  2805  func (f *Fs) Hashes() hash.Set {
  2806  	return hash.Set(hash.MD5)
  2807  }
  2808  
  2809  func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
  2810  	chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
  2811  	if err != nil {
  2812  		return errors.Wrap(err, "couldn't convert chunk size to int")
  2813  	}
  2814  	chunkSize := fs.SizeSuffix(chunkSizeInt)
  2815  	if chunkSize == f.opt.ChunkSize {
  2816  		return nil
  2817  	}
  2818  	err = checkUploadChunkSize(chunkSize)
  2819  	if err == nil {
  2820  		f.opt.ChunkSize = chunkSize
  2821  	}
  2822  	return err
  2823  }
  2824  
  2825  func (f *Fs) changeServiceAccountFile(file string) (err error) {
  2826  	fs.Debugf(nil, "Changing Service Account File from %s to %s", f.opt.ServiceAccountFile, file)
  2827  	if file == f.opt.ServiceAccountFile {
  2828  		return nil
  2829  	}
  2830  	oldSvc := f.svc
  2831  	oldv2Svc := f.v2Svc
  2832  	oldOAuthClient := f.client
  2833  	oldFile := f.opt.ServiceAccountFile
  2834  	oldCredentials := f.opt.ServiceAccountCredentials
  2835  	defer func() {
  2836  		// Undo all the changes instead of doing selective undo's
  2837  		if err != nil {
  2838  			f.svc = oldSvc
  2839  			f.v2Svc = oldv2Svc
  2840  			f.client = oldOAuthClient
  2841  			f.opt.ServiceAccountFile = oldFile
  2842  			f.opt.ServiceAccountCredentials = oldCredentials
  2843  		}
  2844  	}()
  2845  	f.opt.ServiceAccountFile = file
  2846  	f.opt.ServiceAccountCredentials = ""
  2847  	oAuthClient, err := createOAuthClient(&f.opt, f.name, f.m)
  2848  	if err != nil {
  2849  		return errors.Wrap(err, "drive: failed when making oauth client")
  2850  	}
  2851  	f.client = oAuthClient
  2852  	f.svc, err = drive.New(f.client)
  2853  	if err != nil {
  2854  		return errors.Wrap(err, "couldn't create Drive client")
  2855  	}
  2856  	if f.opt.V2DownloadMinSize >= 0 {
  2857  		f.v2Svc, err = drive_v2.New(f.client)
  2858  		if err != nil {
  2859  			return errors.Wrap(err, "couldn't create Drive v2 client")
  2860  		}
  2861  	}
  2862  	return nil
  2863  }
  2864  
  2865  // Create a shortcut from (f, srcPath) to (dstFs, dstPath)
  2866  //
  2867  // Will not overwrite existing files
  2868  func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPath string) (o fs.Object, err error) {
  2869  	srcFs := f
  2870  	srcPath = strings.Trim(srcPath, "/")
  2871  	dstPath = strings.Trim(dstPath, "/")
  2872  	if dstPath == "" {
  2873  		return nil, errors.New("shortcut destination can't be root directory")
  2874  	}
  2875  
  2876  	// Find source
  2877  	var srcID string
  2878  	isDir := false
  2879  	if srcPath == "" {
  2880  		// source is root directory
  2881  		err = f.dirCache.FindRoot(ctx, false)
  2882  		if err != nil {
  2883  			return nil, err
  2884  		}
  2885  		srcID = f.dirCache.RootID()
  2886  		isDir = true
  2887  	} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
  2888  		if err != fs.ErrorNotAFile {
  2889  			return nil, errors.Wrap(err, "can't find source")
  2890  		}
  2891  		// source was a directory
  2892  		srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
  2893  		if err != nil {
  2894  			return nil, errors.Wrap(err, "failed to find source dir")
  2895  		}
  2896  		isDir = true
  2897  	} else {
  2898  		// source was a file
  2899  		srcID = srcObj.(*Object).id
  2900  	}
  2901  	srcID = actualID(srcID) // link to underlying object not to shortcut
  2902  
  2903  	// Find destination
  2904  	_, err = dstFs.NewObject(ctx, dstPath)
  2905  	if err != fs.ErrorObjectNotFound {
  2906  		if err == nil {
  2907  			err = errors.New("existing file")
  2908  		} else if err == fs.ErrorNotAFile {
  2909  			err = errors.New("existing directory")
  2910  		}
  2911  		return nil, errors.Wrap(err, "not overwriting shortcut target")
  2912  	}
  2913  
  2914  	// Create destination shortcut
  2915  	createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
  2916  	if err != nil {
  2917  		return nil, errors.Wrap(err, "shortcut destination failed")
  2918  	}
  2919  	createInfo.MimeType = shortcutMimeType
  2920  	createInfo.ShortcutDetails = &drive.FileShortcutDetails{
  2921  		TargetId: srcID,
  2922  	}
  2923  
  2924  	var info *drive.File
  2925  	err = dstFs.pacer.CallNoRetry(func() (bool, error) {
  2926  		info, err = dstFs.svc.Files.Create(createInfo).
  2927  			Fields(partialFields).
  2928  			SupportsAllDrives(true).
  2929  			KeepRevisionForever(dstFs.opt.KeepRevisionForever).
  2930  			Do()
  2931  		return dstFs.shouldRetry(err)
  2932  	})
  2933  	if err != nil {
  2934  		return nil, errors.Wrap(err, "shortcut creation failed")
  2935  	}
  2936  	if isDir {
  2937  		return nil, nil
  2938  	}
  2939  	return dstFs.newObjectWithInfo(dstPath, info)
  2940  }
  2941  
  2942  var commandHelp = []fs.CommandHelp{{
  2943  	Name:  "get",
  2944  	Short: "Get command for fetching the drive config parameters",
  2945  	Long: `This is a get command which will be used to fetch the various drive config parameters
  2946  
  2947  Usage Examples:
  2948  
  2949      rclone backend get drive: [-o service_account_file] [-o chunk_size]
  2950      rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
  2951  `,
  2952  	Opts: map[string]string{
  2953  		"chunk_size":           "show the current upload chunk size",
  2954  		"service_account_file": "show the current service account file",
  2955  	},
  2956  }, {
  2957  	Name:  "set",
  2958  	Short: "Set command for updating the drive config parameters",
  2959  	Long: `This is a set command which will be used to update the various drive config parameters
  2960  
  2961  Usage Examples:
  2962  
  2963      rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
  2964      rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
  2965  `,
  2966  	Opts: map[string]string{
  2967  		"chunk_size":           "update the current upload chunk size",
  2968  		"service_account_file": "update the current service account file",
  2969  	},
  2970  }, {
  2971  	Name:  "shortcut",
  2972  	Short: "Create shortcuts from files or directories",
  2973  	Long: `This command creates shortcuts from files or directories.
  2974  
  2975  Usage:
  2976  
  2977      rclone backend shortcut drive: source_item destination_shortcut
  2978      rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
  2979  
  2980  In the first example this creates a shortcut from the "source_item"
  2981  which can be a file or a directory to the "destination_shortcut". The
  2982  "source_item" and the "destination_shortcut" should be relative paths
  2983  from "drive:"
  2984  
  2985  In the second example this creates a shortcut from the "source_item"
  2986  relative to "drive:" to the "destination_shortcut" relative to
  2987  "drive2:". This may fail with a permission error if the user
  2988  authenticated with "drive2:" can't read files from "drive:".
  2989  `,
  2990  	Opts: map[string]string{
  2991  		"target": "optional target remote for the shortcut destination",
  2992  	},
  2993  }}
  2994  
  2995  // Command the backend to run a named command
  2996  //
  2997  // The command run is name
  2998  // args may be used to read arguments from
  2999  // opts may be used to read optional arguments from
  3000  //
  3001  // The result should be capable of being JSON encoded
  3002  // If it is a string or a []string it will be shown to the user
  3003  // otherwise it will be JSON encoded and shown to the user like that
  3004  func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
  3005  	switch name {
  3006  	case "get":
  3007  		out := make(map[string]string)
  3008  		if _, ok := opt["service_account_file"]; ok {
  3009  			out["service_account_file"] = f.opt.ServiceAccountFile
  3010  		}
  3011  		if _, ok := opt["chunk_size"]; ok {
  3012  			out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
  3013  		}
  3014  		return out, nil
  3015  	case "set":
  3016  		out := make(map[string]map[string]string)
  3017  		if serviceAccountFile, ok := opt["service_account_file"]; ok {
  3018  			serviceAccountMap := make(map[string]string)
  3019  			serviceAccountMap["previous"] = f.opt.ServiceAccountFile
  3020  			if err = f.changeServiceAccountFile(serviceAccountFile); err != nil {
  3021  				return out, err
  3022  			}
  3023  			f.m.Set("service_account_file", serviceAccountFile)
  3024  			serviceAccountMap["current"] = f.opt.ServiceAccountFile
  3025  			out["service_account_file"] = serviceAccountMap
  3026  		}
  3027  		if chunkSize, ok := opt["chunk_size"]; ok {
  3028  			chunkSizeMap := make(map[string]string)
  3029  			chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
  3030  			if err = f.changeChunkSize(chunkSize); err != nil {
  3031  				return out, err
  3032  			}
  3033  			chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
  3034  			f.m.Set("chunk_size", chunkSizeString)
  3035  			chunkSizeMap["current"] = chunkSizeString
  3036  			out["chunk_size"] = chunkSizeMap
  3037  		}
  3038  		return out, nil
  3039  	case "shortcut":
  3040  		if len(arg) != 2 {
  3041  			return nil, errors.New("need exactly 2 arguments")
  3042  		}
  3043  		dstFs := f
  3044  		target, ok := opt["target"]
  3045  		if ok {
  3046  			targetFs, err := cache.Get(target)
  3047  			if err != nil {
  3048  				return nil, errors.Wrap(err, "couldn't find target")
  3049  			}
  3050  			dstFs, ok = targetFs.(*Fs)
  3051  			if !ok {
  3052  				return nil, errors.New("target is not a drive backend")
  3053  			}
  3054  		}
  3055  		return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
  3056  	default:
  3057  		return nil, fs.ErrorCommandNotFound
  3058  	}
  3059  }
  3060  
  3061  // ------------------------------------------------------------
  3062  
  3063  // Fs returns the parent Fs
  3064  func (o *baseObject) Fs() fs.Info {
  3065  	return o.fs
  3066  }
  3067  
  3068  // Return a string version
  3069  func (o *baseObject) String() string {
  3070  	return o.remote
  3071  }
  3072  
  3073  // Return a string version
  3074  func (o *Object) String() string {
  3075  	if o == nil {
  3076  		return "<nil>"
  3077  	}
  3078  	return o.remote
  3079  }
  3080  
  3081  // Remote returns the remote path
  3082  func (o *baseObject) Remote() string {
  3083  	return o.remote
  3084  }
  3085  
  3086  // Hash returns the Md5sum of an object returning a lowercase hex string
  3087  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  3088  	if t != hash.MD5 {
  3089  		return "", hash.ErrUnsupported
  3090  	}
  3091  	return o.md5sum, nil
  3092  }
  3093  func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
  3094  	if t != hash.MD5 {
  3095  		return "", hash.ErrUnsupported
  3096  	}
  3097  	return "", nil
  3098  }
  3099  
  3100  // Size returns the size of an object in bytes
  3101  func (o *baseObject) Size() int64 {
  3102  	return o.bytes
  3103  }
  3104  
  3105  // getRemoteInfo returns a drive.File for the remote
  3106  func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
  3107  	info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
  3108  	return
  3109  }
  3110  
  3111  // getRemoteInfoWithExport returns a drive.File and the export settings for the remote
  3112  func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
  3113  	info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
  3114  	leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
  3115  	if err != nil {
  3116  		if err == fs.ErrorDirNotFound {
  3117  			return nil, "", "", "", false, fs.ErrorObjectNotFound
  3118  		}
  3119  		return nil, "", "", "", false, err
  3120  	}
  3121  	directoryID = actualID(directoryID)
  3122  
  3123  	found, err := f.list(ctx, []string{directoryID}, leaf, false, false, false, func(item *drive.File) bool {
  3124  		if !f.opt.SkipGdocs {
  3125  			extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
  3126  			if exportName == leaf {
  3127  				info = item
  3128  				return true
  3129  			}
  3130  			if isDocument {
  3131  				return false
  3132  			}
  3133  		}
  3134  		if item.Name == leaf {
  3135  			info = item
  3136  			return true
  3137  		}
  3138  		return false
  3139  	})
  3140  	if err != nil {
  3141  		return nil, "", "", "", false, err
  3142  	}
  3143  	if !found {
  3144  		return nil, "", "", "", false, fs.ErrorObjectNotFound
  3145  	}
  3146  	return
  3147  }
  3148  
  3149  // ModTime returns the modification time of the object
  3150  //
  3151  //
  3152  // It attempts to read the objects mtime and if that isn't present the
  3153  // LastModified returned in the http headers
  3154  func (o *baseObject) ModTime(ctx context.Context) time.Time {
  3155  	modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
  3156  	if err != nil {
  3157  		fs.Debugf(o, "Failed to read mtime from object: %v", err)
  3158  		return time.Now()
  3159  	}
  3160  	return modTime
  3161  }
  3162  
  3163  // SetModTime sets the modification time of the drive fs object
  3164  func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
  3165  	// New metadata
  3166  	updateInfo := &drive.File{
  3167  		ModifiedTime: modTime.Format(timeFormatOut),
  3168  	}
  3169  	// Set modified date
  3170  	var info *drive.File
  3171  	err := o.fs.pacer.Call(func() (bool, error) {
  3172  		var err error
  3173  		info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
  3174  			Fields(partialFields).
  3175  			SupportsAllDrives(true).
  3176  			Do()
  3177  		return o.fs.shouldRetry(err)
  3178  	})
  3179  	if err != nil {
  3180  		return err
  3181  	}
  3182  	// Update info from read data
  3183  	o.modifiedDate = info.ModifiedTime
  3184  	return nil
  3185  }
  3186  
  3187  // Storable returns a boolean as to whether this object is storable
  3188  func (o *baseObject) Storable() bool {
  3189  	return true
  3190  }
  3191  
  3192  // httpResponse gets an http.Response object for the object
  3193  // using the url and method passed in
  3194  func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
  3195  	if url == "" {
  3196  		return nil, nil, errors.New("forbidden to download - check sharing permission")
  3197  	}
  3198  	req, err = http.NewRequest(method, url, nil)
  3199  	if err != nil {
  3200  		return req, nil, err
  3201  	}
  3202  	req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
  3203  	fs.OpenOptionAddHTTPHeaders(req.Header, options)
  3204  	if o.bytes == 0 {
  3205  		// Don't supply range requests for 0 length objects as they always fail
  3206  		delete(req.Header, "Range")
  3207  	}
  3208  	err = o.fs.pacer.Call(func() (bool, error) {
  3209  		res, err = o.fs.client.Do(req)
  3210  		if err == nil {
  3211  			err = googleapi.CheckResponse(res)
  3212  			if err != nil {
  3213  				_ = res.Body.Close() // ignore error
  3214  			}
  3215  		}
  3216  		return o.fs.shouldRetry(err)
  3217  	})
  3218  	if err != nil {
  3219  		return req, nil, err
  3220  	}
  3221  	return req, res, nil
  3222  }
  3223  
  3224  // openDocumentFile represents a documentObject open for reading.
  3225  // Updates the object size after read successfully.
  3226  type openDocumentFile struct {
  3227  	o       *documentObject // Object we are reading for
  3228  	in      io.ReadCloser   // reading from here
  3229  	bytes   int64           // number of bytes read on this connection
  3230  	eof     bool            // whether we have read end of file
  3231  	errored bool            // whether we have encountered an error during reading
  3232  }
  3233  
  3234  // Read bytes from the object - see io.Reader
  3235  func (file *openDocumentFile) Read(p []byte) (n int, err error) {
  3236  	n, err = file.in.Read(p)
  3237  	file.bytes += int64(n)
  3238  	if err != nil && err != io.EOF {
  3239  		file.errored = true
  3240  	}
  3241  	if err == io.EOF {
  3242  		file.eof = true
  3243  	}
  3244  	return
  3245  }
  3246  
  3247  // Close the object and update bytes read
  3248  func (file *openDocumentFile) Close() (err error) {
  3249  	// If end of file, update bytes read
  3250  	if file.eof && !file.errored {
  3251  		fs.Debugf(file.o, "Updating size of doc after download to %v", file.bytes)
  3252  		file.o.bytes = file.bytes
  3253  	}
  3254  	return file.in.Close()
  3255  }
  3256  
  3257  // Check it satisfies the interfaces
  3258  var _ io.ReadCloser = (*openDocumentFile)(nil)
  3259  
  3260  // Checks to see if err is a googleapi.Error with of type what
  3261  func isGoogleError(err error, what string) bool {
  3262  	if gerr, ok := err.(*googleapi.Error); ok {
  3263  		for _, error := range gerr.Errors {
  3264  			if error.Reason == what {
  3265  				return true
  3266  			}
  3267  		}
  3268  	}
  3269  	return false
  3270  }
  3271  
  3272  // open a url for reading
  3273  func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  3274  	_, res, err := o.httpResponse(ctx, url, "GET", options)
  3275  	if err != nil {
  3276  		if isGoogleError(err, "cannotDownloadAbusiveFile") {
  3277  			if o.fs.opt.AcknowledgeAbuse {
  3278  				// Retry acknowledging abuse
  3279  				if strings.ContainsRune(url, '?') {
  3280  					url += "&"
  3281  				} else {
  3282  					url += "?"
  3283  				}
  3284  				url += "acknowledgeAbuse=true"
  3285  				_, res, err = o.httpResponse(ctx, url, "GET", options)
  3286  			} else {
  3287  				err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
  3288  			}
  3289  		}
  3290  		if err != nil {
  3291  			return nil, errors.Wrap(err, "open file failed")
  3292  		}
  3293  	}
  3294  	return res.Body, nil
  3295  }
  3296  
  3297  // Open an object for read
  3298  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  3299  	if o.v2Download {
  3300  		var v2File *drive_v2.File
  3301  		err = o.fs.pacer.Call(func() (bool, error) {
  3302  			v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
  3303  				Fields("downloadUrl").
  3304  				SupportsAllDrives(true).
  3305  				Do()
  3306  			return o.fs.shouldRetry(err)
  3307  		})
  3308  		if err == nil {
  3309  			fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
  3310  			o.url = v2File.DownloadUrl
  3311  			o.v2Download = false
  3312  		}
  3313  	}
  3314  	return o.baseObject.open(ctx, o.url, options...)
  3315  }
  3316  func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  3317  	// Update the size with what we are reading as it can change from
  3318  	// the HEAD in the listing to this GET. This stops rclone marking
  3319  	// the transfer as corrupted.
  3320  	var offset, end int64 = 0, -1
  3321  	var newOptions = options[:0]
  3322  	for _, o := range options {
  3323  		// Note that Range requests don't work on Google docs:
  3324  		// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
  3325  		// So do a subset of them manually
  3326  		switch x := o.(type) {
  3327  		case *fs.RangeOption:
  3328  			offset, end = x.Start, x.End
  3329  		case *fs.SeekOption:
  3330  			offset, end = x.Offset, -1
  3331  		default:
  3332  			newOptions = append(newOptions, o)
  3333  		}
  3334  	}
  3335  	options = newOptions
  3336  	if offset != 0 {
  3337  		return nil, errors.New("partial downloads are not supported while exporting Google Documents")
  3338  	}
  3339  	in, err = o.baseObject.open(ctx, o.url, options...)
  3340  	if in != nil {
  3341  		in = &openDocumentFile{o: o, in: in}
  3342  	}
  3343  	if end >= 0 {
  3344  		in = readers.NewLimitedReadCloser(in, end-offset+1)
  3345  	}
  3346  	return
  3347  }
  3348  func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  3349  	var offset, limit int64 = 0, -1
  3350  	var data = o.content
  3351  	for _, option := range options {
  3352  		switch x := option.(type) {
  3353  		case *fs.SeekOption:
  3354  			offset = x.Offset
  3355  		case *fs.RangeOption:
  3356  			offset, limit = x.Decode(int64(len(data)))
  3357  		default:
  3358  			if option.Mandatory() {
  3359  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  3360  			}
  3361  		}
  3362  	}
  3363  	if l := int64(len(data)); offset > l {
  3364  		offset = l
  3365  	}
  3366  	data = data[offset:]
  3367  	if limit != -1 && limit < int64(len(data)) {
  3368  		data = data[:limit]
  3369  	}
  3370  
  3371  	return ioutil.NopCloser(bytes.NewReader(data)), nil
  3372  }
  3373  
  3374  func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
  3375  	src fs.ObjectInfo) (info *drive.File, err error) {
  3376  	// Make the API request to upload metadata and file data.
  3377  	size := src.Size()
  3378  	if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
  3379  		// Don't retry, return a retry error instead
  3380  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  3381  			info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
  3382  				Media(in, googleapi.ContentType(uploadMimeType)).
  3383  				Fields(partialFields).
  3384  				SupportsAllDrives(true).
  3385  				KeepRevisionForever(o.fs.opt.KeepRevisionForever).
  3386  				Do()
  3387  			return o.fs.shouldRetry(err)
  3388  		})
  3389  		return
  3390  	}
  3391  	// Upload the file in chunks
  3392  	return o.fs.Upload(ctx, in, size, uploadMimeType, o.id, o.remote, updateInfo)
  3393  }
  3394  
  3395  // Update the already existing object
  3396  //
  3397  // Copy the reader into the object updating modTime and size
  3398  //
  3399  // The new object may have been created if an error is returned
  3400  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  3401  	// If o is a shortcut
  3402  	if isShortcutID(o.id) {
  3403  		// Delete it first
  3404  		err := o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
  3405  		if err != nil {
  3406  			return err
  3407  		}
  3408  		// Then put the file as a new file
  3409  		newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
  3410  		if err != nil {
  3411  			return err
  3412  		}
  3413  		// Update the object
  3414  		if newO, ok := newObj.(*Object); ok {
  3415  			*o = *newO
  3416  		} else {
  3417  			fs.Debugf(newObj, "Failed to update object %T from new object %T", o, newObj)
  3418  		}
  3419  		return nil
  3420  	}
  3421  	srcMimeType := fs.MimeType(ctx, src)
  3422  	updateInfo := &drive.File{
  3423  		MimeType:     srcMimeType,
  3424  		ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
  3425  	}
  3426  	info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
  3427  	if err != nil {
  3428  		return err
  3429  	}
  3430  	newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
  3431  	if err != nil {
  3432  		return err
  3433  	}
  3434  	switch newO := newO.(type) {
  3435  	case *Object:
  3436  		*o = *newO
  3437  	default:
  3438  		return errors.New("object type changed by update")
  3439  	}
  3440  
  3441  	return nil
  3442  }
  3443  func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  3444  	srcMimeType := fs.MimeType(ctx, src)
  3445  	importMimeType := ""
  3446  	updateInfo := &drive.File{
  3447  		MimeType:     srcMimeType,
  3448  		ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
  3449  	}
  3450  
  3451  	if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
  3452  		return errors.Errorf("can't update google document type without --drive-import-formats")
  3453  	}
  3454  	importMimeType = o.fs.findImportFormat(updateInfo.MimeType)
  3455  	if importMimeType == "" {
  3456  		return errors.Errorf("no import format found for %q", srcMimeType)
  3457  	}
  3458  	if importMimeType != o.documentMimeType {
  3459  		return errors.Errorf("can't change google document type (o: %q, src: %q, import: %q)", o.documentMimeType, srcMimeType, importMimeType)
  3460  	}
  3461  	updateInfo.MimeType = importMimeType
  3462  
  3463  	info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
  3464  	if err != nil {
  3465  		return err
  3466  	}
  3467  
  3468  	remote := src.Remote()
  3469  	remote = remote[:len(remote)-o.extLen]
  3470  
  3471  	newO, err := o.fs.newObjectWithInfo(remote, info)
  3472  	if err != nil {
  3473  		return err
  3474  	}
  3475  	switch newO := newO.(type) {
  3476  	case *documentObject:
  3477  		*o = *newO
  3478  	default:
  3479  		return errors.New("object type changed by update")
  3480  	}
  3481  
  3482  	return nil
  3483  }
  3484  
  3485  func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  3486  	return errors.New("cannot update link files")
  3487  }
  3488  
  3489  // Remove an object
  3490  func (o *baseObject) Remove(ctx context.Context) error {
  3491  	if o.parents > 1 {
  3492  		return errors.New("can't delete safely - has multiple parents")
  3493  	}
  3494  	return o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
  3495  }
  3496  
  3497  // MimeType of an Object if known, "" otherwise
  3498  func (o *baseObject) MimeType(ctx context.Context) string {
  3499  	return o.mimeType
  3500  }
  3501  
  3502  // ID returns the ID of the Object if known, or "" if not
  3503  func (o *baseObject) ID() string {
  3504  	return o.id
  3505  }
  3506  
  3507  func (o *documentObject) ext() string {
  3508  	return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
  3509  }
  3510  func (o *linkObject) ext() string {
  3511  	return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
  3512  }
  3513  
  3514  // templates for document link files
  3515  const (
  3516  	urlTemplate = `[InternetShortcut]{{"\r"}}
  3517  URL={{ .URL }}{{"\r"}}
  3518  `
  3519  	weblocTemplate = `<?xml version="1.0" encoding="UTF-8"?>
  3520  <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
  3521  <plist version="1.0">
  3522    <dict>
  3523      <key>URL</key>
  3524      <string>{{ .URL }}</string>
  3525    </dict>
  3526  </plist>
  3527  `
  3528  	desktopTemplate = `[Desktop Entry]
  3529  Encoding=UTF-8
  3530  Name={{ .Title }}
  3531  URL={{ .URL }}
  3532  Icon=text-html
  3533  Type=Link
  3534  `
  3535  	htmlTemplate = `<html>
  3536  <head>
  3537    <meta http-equiv="refresh" content="0; url={{ .URL }}" />
  3538    <title>{{ .Title }}</title>
  3539  </head>
  3540  <body>
  3541    Loading <a href="{{ .URL }}">{{ .Title }}</a>
  3542  </body>
  3543  </html>
  3544  `
  3545  )
  3546  
  3547  // Check the interfaces are satisfied
  3548  var (
  3549  	_ fs.Fs              = (*Fs)(nil)
  3550  	_ fs.Purger          = (*Fs)(nil)
  3551  	_ fs.CleanUpper      = (*Fs)(nil)
  3552  	_ fs.PutStreamer     = (*Fs)(nil)
  3553  	_ fs.Copier          = (*Fs)(nil)
  3554  	_ fs.Mover           = (*Fs)(nil)
  3555  	_ fs.DirMover        = (*Fs)(nil)
  3556  	_ fs.Commander       = (*Fs)(nil)
  3557  	_ fs.DirCacheFlusher = (*Fs)(nil)
  3558  	_ fs.ChangeNotifier  = (*Fs)(nil)
  3559  	_ fs.PutUncheckeder  = (*Fs)(nil)
  3560  	_ fs.PublicLinker    = (*Fs)(nil)
  3561  	_ fs.ListRer         = (*Fs)(nil)
  3562  	_ fs.MergeDirser     = (*Fs)(nil)
  3563  	_ fs.Abouter         = (*Fs)(nil)
  3564  	_ fs.Object          = (*Object)(nil)
  3565  	_ fs.MimeTyper       = (*Object)(nil)
  3566  	_ fs.IDer            = (*Object)(nil)
  3567  	_ fs.Object          = (*documentObject)(nil)
  3568  	_ fs.MimeTyper       = (*documentObject)(nil)
  3569  	_ fs.IDer            = (*documentObject)(nil)
  3570  	_ fs.Object          = (*linkObject)(nil)
  3571  	_ fs.MimeTyper       = (*linkObject)(nil)
  3572  	_ fs.IDer            = (*linkObject)(nil)
  3573  )