github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/hidrive/helpers.go (about)

     1  package hidrive
     2  
     3  // This file is for helper-functions which may provide more general and
     4  // specialized functionality than the generic interfaces.
     5  // There are two sections:
     6  // 1. methods bound to Fs
     7  // 2. other functions independent from Fs used throughout the package
     8  
     9  // NOTE: Functions accessing paths expect any relative paths
    10  // to be resolved prior to execution with resolvePath(...).
    11  
    12  import (
    13  	"bytes"
    14  	"context"
    15  	"errors"
    16  	"io"
    17  	"net/http"
    18  	"path"
    19  	"strconv"
    20  	"sync"
    21  	"time"
    22  
    23  	"github.com/rclone/rclone/backend/hidrive/api"
    24  	"github.com/rclone/rclone/fs"
    25  	"github.com/rclone/rclone/fs/accounting"
    26  	"github.com/rclone/rclone/fs/fserrors"
    27  	"github.com/rclone/rclone/lib/ranges"
    28  	"github.com/rclone/rclone/lib/readers"
    29  	"github.com/rclone/rclone/lib/rest"
    30  	"golang.org/x/sync/errgroup"
    31  	"golang.org/x/sync/semaphore"
    32  )
    33  
    34  const (
    35  	// MaximumUploadBytes represents the maximum amount of bytes
    36  	// a single upload-operation will support.
    37  	MaximumUploadBytes = 2147483647 // = 2GiB - 1
    38  	// iterationChunkSize represents the chunk size used to iterate directory contents.
    39  	iterationChunkSize = 5000
    40  )
    41  
    42  var (
    43  	// retryErrorCodes is a slice of error codes that we will always retry.
    44  	retryErrorCodes = []int{
    45  		429, // Too Many Requests
    46  		500, // Internal Server Error
    47  		502, // Bad Gateway
    48  		503, // Service Unavailable
    49  		504, // Gateway Timeout
    50  		509, // Bandwidth Limit Exceeded
    51  	}
    52  	// ErrorFileExists is returned when a query tries to create a file
    53  	// that already exists.
    54  	ErrorFileExists = errors.New("destination file already exists")
    55  )
    56  
    57  // MemberType represents the possible types of entries a directory can contain.
    58  type MemberType string
    59  
    60  // possible values for MemberType
    61  const (
    62  	AllMembers       MemberType = "all"
    63  	NoMembers        MemberType = "none"
    64  	DirectoryMembers MemberType = api.HiDriveObjectTypeDirectory
    65  	FileMembers      MemberType = api.HiDriveObjectTypeFile
    66  	SymlinkMembers   MemberType = api.HiDriveObjectTypeSymlink
    67  )
    68  
    69  // SortByField represents possible fields to sort entries of a directory by.
    70  type SortByField string
    71  
    72  // possible values for SortByField
    73  const (
    74  	descendingSort             string      = "-"
    75  	SortByName                 SortByField = "name"
    76  	SortByModTime              SortByField = "mtime"
    77  	SortByObjectType           SortByField = "type"
    78  	SortBySize                 SortByField = "size"
    79  	SortByNameDescending       SortByField = SortByField(descendingSort) + SortByName
    80  	SortByModTimeDescending    SortByField = SortByField(descendingSort) + SortByModTime
    81  	SortByObjectTypeDescending SortByField = SortByField(descendingSort) + SortByObjectType
    82  	SortBySizeDescending       SortByField = SortByField(descendingSort) + SortBySize
    83  )
    84  
    85  var (
    86  	// Unsorted disables sorting and can therefore not be combined with other values.
    87  	Unsorted = []SortByField{"none"}
    88  	// DefaultSorted does not specify how to sort and
    89  	// therefore implies the default sort order.
    90  	DefaultSorted = []SortByField{}
    91  )
    92  
    93  // CopyOrMoveOperationType represents the possible types of copy- and move-operations.
    94  type CopyOrMoveOperationType int
    95  
    96  // possible values for CopyOrMoveOperationType
    97  const (
    98  	MoveOriginal CopyOrMoveOperationType = iota
    99  	CopyOriginal
   100  	CopyOriginalPreserveModTime
   101  )
   102  
   103  // OnExistAction represents possible actions the API should take,
   104  // when a request tries to create a path that already exists.
   105  type OnExistAction string
   106  
   107  // possible values for OnExistAction
   108  const (
   109  	// IgnoreOnExist instructs the API not to execute
   110  	// the request in case of a conflict, but to return an error.
   111  	IgnoreOnExist OnExistAction = "ignore"
   112  	// AutoNameOnExist instructs the API to automatically rename
   113  	// any conflicting request-objects.
   114  	AutoNameOnExist OnExistAction = "autoname"
   115  	// OverwriteOnExist instructs the API to overwrite any conflicting files.
   116  	// This can only be used, if the request operates on files directly.
   117  	// (For example when moving/copying a file.)
   118  	// For most requests this action will simply be ignored.
   119  	OverwriteOnExist OnExistAction = "overwrite"
   120  )
   121  
   122  // shouldRetry returns a boolean as to whether this resp and err deserve to be retried.
   123  // It tries to expire/invalidate the token, if necessary.
   124  // It returns the err as a convenience.
   125  func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
   126  	if fserrors.ContextError(ctx, &err) {
   127  		return false, err
   128  	}
   129  	if resp != nil && (resp.StatusCode == 401 || isHTTPError(err, 401)) && len(resp.Header["Www-Authenticate"]) > 0 {
   130  		fs.Debugf(f, "Token might be invalid: %v", err)
   131  		if f.tokenRenewer != nil {
   132  			iErr := f.tokenRenewer.Expire()
   133  			if iErr == nil {
   134  				return true, err
   135  			}
   136  		}
   137  	}
   138  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   139  }
   140  
   141  // resolvePath resolves the given (relative) path and
   142  // returns a path suitable for API-calls.
   143  // This will consider the root-path of the fs and any needed prefixes.
   144  //
   145  // Any relative paths passed to functions that access these paths should
   146  // be resolved with this first!
   147  func (f *Fs) resolvePath(objectPath string) string {
   148  	resolved := path.Join(f.opt.RootPrefix, f.root, f.opt.Enc.FromStandardPath(objectPath))
   149  	return resolved
   150  }
   151  
   152  // iterateOverDirectory calls the given function callback
   153  // on each item found in a given directory.
   154  //
   155  // If callback ever returns true then this exits early with found = true.
   156  func (f *Fs) iterateOverDirectory(ctx context.Context, directory string, searchOnly MemberType, callback func(*api.HiDriveObject) bool, fields []string, sortBy []SortByField) (found bool, err error) {
   157  	parameters := api.NewQueryParameters()
   158  	parameters.SetPath(directory)
   159  	parameters.AddFields("members.", fields...)
   160  	parameters.AddFields("", api.DirectoryContentFields...)
   161  	parameters.Set("members", string(searchOnly))
   162  	for _, v := range sortBy {
   163  		// The explicit conversion is necessary for each element.
   164  		parameters.AddList("sort", ",", string(v))
   165  	}
   166  
   167  	opts := rest.Opts{
   168  		Method:     "GET",
   169  		Path:       "/dir",
   170  		Parameters: parameters.Values,
   171  	}
   172  
   173  	iterateContent := func(result *api.DirectoryContent, err error) (bool, error) {
   174  		if err != nil {
   175  			return false, err
   176  		}
   177  		for _, item := range result.Entries {
   178  			item.Name = f.opt.Enc.ToStandardName(item.Name)
   179  			if callback(&item) {
   180  				return true, nil
   181  			}
   182  		}
   183  		return false, nil
   184  	}
   185  	return f.paginateDirectoryAccess(ctx, &opts, iterationChunkSize, 0, iterateContent)
   186  }
   187  
   188  // paginateDirectoryAccess executes requests specified via ctx and opts
   189  // which should produce api.DirectoryContent.
   190  // This will paginate the requests using limit starting at the given offset.
   191  //
   192  // The given function callback is called on each api.DirectoryContent found
   193  // along with any errors that occurred.
   194  // If callback ever returns true then this exits early with found = true.
   195  // If callback ever returns an error then this exits early with that error.
   196  func (f *Fs) paginateDirectoryAccess(ctx context.Context, opts *rest.Opts, limit int64, offset int64, callback func(*api.DirectoryContent, error) (bool, error)) (found bool, err error) {
   197  	for {
   198  		opts.Parameters.Set("limit", strconv.FormatInt(offset, 10)+","+strconv.FormatInt(limit, 10))
   199  
   200  		var result api.DirectoryContent
   201  		var resp *http.Response
   202  		err = f.pacer.Call(func() (bool, error) {
   203  			resp, err = f.srv.CallJSON(ctx, opts, nil, &result)
   204  			return f.shouldRetry(ctx, resp, err)
   205  		})
   206  
   207  		found, err = callback(&result, err)
   208  		if found || err != nil {
   209  			return found, err
   210  		}
   211  
   212  		offset += int64(len(result.Entries))
   213  		if offset >= result.TotalCount || limit > int64(len(result.Entries)) {
   214  			break
   215  		}
   216  	}
   217  	return false, nil
   218  }
   219  
   220  // fetchMetadataForPath reads the metadata from the path.
   221  func (f *Fs) fetchMetadataForPath(ctx context.Context, path string, fields []string) (*api.HiDriveObject, error) {
   222  	parameters := api.NewQueryParameters()
   223  	parameters.SetPath(path)
   224  	parameters.AddFields("", fields...)
   225  
   226  	opts := rest.Opts{
   227  		Method:     "GET",
   228  		Path:       "/meta",
   229  		Parameters: parameters.Values,
   230  	}
   231  
   232  	var result api.HiDriveObject
   233  	var resp *http.Response
   234  	var err error
   235  	err = f.pacer.Call(func() (bool, error) {
   236  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
   237  		return f.shouldRetry(ctx, resp, err)
   238  	})
   239  	if err != nil {
   240  		return nil, err
   241  	}
   242  	return &result, nil
   243  }
   244  
   245  // copyOrMove copies or moves a directory or file
   246  // from the source-path to the destination-path.
   247  //
   248  // The operation will only be successful
   249  // if the parent-directory of the destination-path exists.
   250  //
   251  // NOTE: Use the explicit methods instead of directly invoking this method.
   252  // (Those are: copyDirectory, moveDirectory, copyFile, moveFile.)
   253  func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType CopyOrMoveOperationType, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
   254  	parameters := api.NewQueryParameters()
   255  	parameters.Set("src", source)
   256  	parameters.Set("dst", destination)
   257  	if onExist == AutoNameOnExist ||
   258  		(onExist == OverwriteOnExist && !isDirectory) {
   259  		parameters.Set("on_exist", string(onExist))
   260  	}
   261  
   262  	endpoint := "/"
   263  	if isDirectory {
   264  		endpoint += "dir"
   265  	} else {
   266  		endpoint += "file"
   267  	}
   268  	switch operationType {
   269  	case MoveOriginal:
   270  		endpoint += "/move"
   271  	case CopyOriginalPreserveModTime:
   272  		parameters.Set("preserve_mtime", strconv.FormatBool(true))
   273  		fallthrough
   274  	case CopyOriginal:
   275  		endpoint += "/copy"
   276  	}
   277  
   278  	opts := rest.Opts{
   279  		Method:     "POST",
   280  		Path:       endpoint,
   281  		Parameters: parameters.Values,
   282  	}
   283  
   284  	var result api.HiDriveObject
   285  	var resp *http.Response
   286  	var err error
   287  	err = f.pacer.Call(func() (bool, error) {
   288  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
   289  		return f.shouldRetry(ctx, resp, err)
   290  	})
   291  	if err != nil {
   292  		return nil, err
   293  	}
   294  	return &result, nil
   295  }
   296  
   297  // moveDirectory moves the directory at the source-path to the destination-path and
   298  // returns the resulting api-object if successful.
   299  //
   300  // The operation will only be successful
   301  // if the parent-directory of the destination-path exists.
   302  func (f *Fs) moveDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
   303  	return f.copyOrMove(ctx, true, MoveOriginal, source, destination, onExist)
   304  }
   305  
   306  // copyFile copies the file at the source-path to the destination-path and
   307  // returns the resulting api-object if successful.
   308  //
   309  // The operation will only be successful
   310  // if the parent-directory of the destination-path exists.
   311  //
   312  // NOTE: This operation will expand sparse areas in the content of the source-file
   313  // to blocks of 0-bytes in the destination-file.
   314  func (f *Fs) copyFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
   315  	return f.copyOrMove(ctx, false, CopyOriginalPreserveModTime, source, destination, onExist)
   316  }
   317  
   318  // moveFile moves the file at the source-path to the destination-path and
   319  // returns the resulting api-object if successful.
   320  //
   321  // The operation will only be successful
   322  // if the parent-directory of the destination-path exists.
   323  //
   324  // NOTE: This operation may expand sparse areas in the content of the source-file
   325  // to blocks of 0-bytes in the destination-file.
   326  func (f *Fs) moveFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
   327  	return f.copyOrMove(ctx, false, MoveOriginal, source, destination, onExist)
   328  }
   329  
   330  // createDirectory creates the directory at the given path and
   331  // returns the resulting api-object if successful.
   332  //
   333  // The directory will only be created if its parent-directory exists.
   334  // This returns fs.ErrorDirNotFound if the parent-directory is not found.
   335  // This returns fs.ErrorDirExists if the directory already exists.
   336  func (f *Fs) createDirectory(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
   337  	parameters := api.NewQueryParameters()
   338  	parameters.SetPath(directory)
   339  	if onExist == AutoNameOnExist {
   340  		parameters.Set("on_exist", string(onExist))
   341  	}
   342  
   343  	opts := rest.Opts{
   344  		Method:     "POST",
   345  		Path:       "/dir",
   346  		Parameters: parameters.Values,
   347  	}
   348  
   349  	var result api.HiDriveObject
   350  	var resp *http.Response
   351  	var err error
   352  	err = f.pacer.Call(func() (bool, error) {
   353  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
   354  		return f.shouldRetry(ctx, resp, err)
   355  	})
   356  
   357  	switch {
   358  	case err == nil:
   359  		return &result, nil
   360  	case isHTTPError(err, 404):
   361  		return nil, fs.ErrorDirNotFound
   362  	case isHTTPError(err, 409):
   363  		return nil, fs.ErrorDirExists
   364  	}
   365  	return nil, err
   366  }
   367  
   368  // createDirectories creates the directory at the given path
   369  // along with any missing parent directories and
   370  // returns the resulting api-object (of the created directory) if successful.
   371  //
   372  // This returns fs.ErrorDirExists if the directory already exists.
   373  //
   374  // If an error occurs while the parent directories are being created,
   375  // any directories already created will NOT be deleted again.
   376  func (f *Fs) createDirectories(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
   377  	result, err := f.createDirectory(ctx, directory, onExist)
   378  	if err == nil {
   379  		return result, nil
   380  	}
   381  	if err != fs.ErrorDirNotFound {
   382  		return nil, err
   383  	}
   384  	parentDirectory := path.Dir(directory)
   385  	_, err = f.createDirectories(ctx, parentDirectory, onExist)
   386  	if err != nil && err != fs.ErrorDirExists {
   387  		return nil, err
   388  	}
   389  	// NOTE: Ignoring fs.ErrorDirExists does no harm,
   390  	// since it does not mean the child directory cannot be created.
   391  	return f.createDirectory(ctx, directory, onExist)
   392  }
   393  
   394  // deleteDirectory deletes the directory at the given path.
   395  //
   396  // If recursive is false, the directory will only be deleted if it is empty.
   397  // If recursive is true, the directory will be deleted regardless of its content.
   398  // This returns fs.ErrorDirNotFound if the directory is not found.
   399  // This returns fs.ErrorDirectoryNotEmpty if the directory is not empty and
   400  // recursive is false.
   401  func (f *Fs) deleteDirectory(ctx context.Context, directory string, recursive bool) error {
   402  	parameters := api.NewQueryParameters()
   403  	parameters.SetPath(directory)
   404  	parameters.Set("recursive", strconv.FormatBool(recursive))
   405  
   406  	opts := rest.Opts{
   407  		Method:     "DELETE",
   408  		Path:       "/dir",
   409  		Parameters: parameters.Values,
   410  		NoResponse: true,
   411  	}
   412  
   413  	var resp *http.Response
   414  	var err error
   415  	err = f.pacer.Call(func() (bool, error) {
   416  		resp, err = f.srv.Call(ctx, &opts)
   417  		return f.shouldRetry(ctx, resp, err)
   418  	})
   419  
   420  	switch {
   421  	case isHTTPError(err, 404):
   422  		return fs.ErrorDirNotFound
   423  	case isHTTPError(err, 409):
   424  		return fs.ErrorDirectoryNotEmpty
   425  	}
   426  	return err
   427  }
   428  
   429  // deleteObject deletes the object/file at the given path.
   430  //
   431  // This returns fs.ErrorObjectNotFound if the object is not found.
   432  func (f *Fs) deleteObject(ctx context.Context, path string) error {
   433  	parameters := api.NewQueryParameters()
   434  	parameters.SetPath(path)
   435  
   436  	opts := rest.Opts{
   437  		Method:     "DELETE",
   438  		Path:       "/file",
   439  		Parameters: parameters.Values,
   440  		NoResponse: true,
   441  	}
   442  
   443  	var resp *http.Response
   444  	var err error
   445  	err = f.pacer.Call(func() (bool, error) {
   446  		resp, err = f.srv.Call(ctx, &opts)
   447  		return f.shouldRetry(ctx, resp, err)
   448  	})
   449  
   450  	if isHTTPError(err, 404) {
   451  		return fs.ErrorObjectNotFound
   452  	}
   453  	return err
   454  }
   455  
   456  // createFile creates a file at the given path
   457  // with the content of the io.ReadSeeker.
   458  // This guarantees that existing files will not be overwritten.
   459  // The maximum size of the content is limited by MaximumUploadBytes.
   460  // The io.ReadSeeker should be resettable by seeking to its start.
   461  // If modTime is not the zero time instant,
   462  // it will be set as the file's modification time after the operation.
   463  //
   464  // This returns fs.ErrorDirNotFound
   465  // if the parent directory of the file is not found.
   466  // This returns ErrorFileExists if a file already exists at the specified path.
   467  func (f *Fs) createFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time, onExist OnExistAction) (*api.HiDriveObject, error) {
   468  	parameters := api.NewQueryParameters()
   469  	parameters.SetFileInDirectory(path)
   470  	if onExist == AutoNameOnExist {
   471  		parameters.Set("on_exist", string(onExist))
   472  	}
   473  
   474  	var err error
   475  	if !modTime.IsZero() {
   476  		err = parameters.SetTime("mtime", modTime)
   477  		if err != nil {
   478  			return nil, err
   479  		}
   480  	}
   481  
   482  	opts := rest.Opts{
   483  		Method:      "POST",
   484  		Path:        "/file",
   485  		Body:        content,
   486  		ContentType: "application/octet-stream",
   487  		Parameters:  parameters.Values,
   488  	}
   489  
   490  	var result api.HiDriveObject
   491  	var resp *http.Response
   492  	err = f.pacer.Call(func() (bool, error) {
   493  		// Reset the reading index (in case this is a retry).
   494  		if _, err = content.Seek(0, io.SeekStart); err != nil {
   495  			return false, err
   496  		}
   497  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
   498  		return f.shouldRetry(ctx, resp, err)
   499  	})
   500  
   501  	switch {
   502  	case err == nil:
   503  		return &result, nil
   504  	case isHTTPError(err, 404):
   505  		return nil, fs.ErrorDirNotFound
   506  	case isHTTPError(err, 409):
   507  		return nil, ErrorFileExists
   508  	}
   509  	return nil, err
   510  }
   511  
   512  // overwriteFile updates the content of the file at the given path
   513  // with the content of the io.ReadSeeker.
   514  // If the file does not exist it will be created.
   515  // The maximum size of the content is limited by MaximumUploadBytes.
   516  // The io.ReadSeeker should be resettable by seeking to its start.
   517  // If modTime is not the zero time instant,
   518  // it will be set as the file's modification time after the operation.
   519  //
   520  // This returns fs.ErrorDirNotFound
   521  // if the parent directory of the file is not found.
   522  func (f *Fs) overwriteFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time) (*api.HiDriveObject, error) {
   523  	parameters := api.NewQueryParameters()
   524  	parameters.SetFileInDirectory(path)
   525  
   526  	var err error
   527  	if !modTime.IsZero() {
   528  		err = parameters.SetTime("mtime", modTime)
   529  		if err != nil {
   530  			return nil, err
   531  		}
   532  	}
   533  
   534  	opts := rest.Opts{
   535  		Method:      "PUT",
   536  		Path:        "/file",
   537  		Body:        content,
   538  		ContentType: "application/octet-stream",
   539  		Parameters:  parameters.Values,
   540  	}
   541  
   542  	var result api.HiDriveObject
   543  	var resp *http.Response
   544  	err = f.pacer.Call(func() (bool, error) {
   545  		// Reset the reading index (in case this is a retry).
   546  		if _, err = content.Seek(0, io.SeekStart); err != nil {
   547  			return false, err
   548  		}
   549  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
   550  		return f.shouldRetry(ctx, resp, err)
   551  	})
   552  
   553  	switch {
   554  	case err == nil:
   555  		return &result, nil
   556  	case isHTTPError(err, 404):
   557  		return nil, fs.ErrorDirNotFound
   558  	}
   559  	return nil, err
   560  }
   561  
   562  // uploadFileChunked updates the content of the existing file at the given path
   563  // with the content of the io.Reader.
   564  // Returns the position of the last successfully written byte, stopping before the first failed write.
   565  // If nothing was written this will be 0.
   566  // Returns the resulting api-object if successful.
   567  //
   568  // Replaces the file contents by uploading multiple chunks of the given size in parallel.
   569  // Therefore this can and be used to upload files of any size efficiently.
   570  // The number of parallel transfers is limited by transferLimit which should larger than 0.
   571  // If modTime is not the zero time instant,
   572  // it will be set as the file's modification time after the operation.
   573  //
   574  // NOTE: This method uses updateFileChunked and may create sparse files,
   575  // if the upload of a chunk fails unexpectedly.
   576  // See note about sparse files in patchFile.
   577  // If any of the uploads fail, the process will be aborted and
   578  // the first error that occurred will be returned.
   579  // This is not an atomic operation,
   580  // therefore if the upload fails the file may be partially modified.
   581  //
   582  // This returns fs.ErrorObjectNotFound if the object is not found.
   583  func (f *Fs) uploadFileChunked(ctx context.Context, path string, content io.Reader, modTime time.Time, chunkSize int, transferLimit int64) (okSize uint64, info *api.HiDriveObject, err error) {
   584  	okSize, err = f.updateFileChunked(ctx, path, content, 0, chunkSize, transferLimit)
   585  
   586  	if err == nil {
   587  		info, err = f.resizeFile(ctx, path, okSize, modTime)
   588  	}
   589  	return okSize, info, err
   590  }
   591  
   592  // updateFileChunked updates the content of the existing file at the given path
   593  // starting at the given offset.
   594  // Returns the position of the last successfully written byte, stopping before the first failed write.
   595  // If nothing was written this will be 0.
   596  //
   597  // Replaces the file contents starting from the given byte offset
   598  // with the content of the io.Reader.
   599  // If the offset is beyond the file end, the file is extended up to the offset.
   600  //
   601  // The upload is done multiple chunks of the given size in parallel.
   602  // Therefore this can and be used to upload files of any size efficiently.
   603  // The number of parallel transfers is limited by transferLimit which should larger than 0.
   604  //
   605  // NOTE: Because it is inefficient to set the modification time with every chunk,
   606  // setting it to a specific value must be done in a separate request
   607  // after this operation finishes.
   608  //
   609  // NOTE: This method uses patchFile and may create sparse files,
   610  // especially if the upload of a chunk fails unexpectedly.
   611  // See note about sparse files in patchFile.
   612  // If any of the uploads fail, the process will be aborted and
   613  // the first error that occurred will be returned.
   614  // This is not an atomic operation,
   615  // therefore if the upload fails the file may be partially modified.
   616  //
   617  // This returns fs.ErrorObjectNotFound if the object is not found.
   618  func (f *Fs) updateFileChunked(ctx context.Context, path string, content io.Reader, offset uint64, chunkSize int, transferLimit int64) (okSize uint64, err error) {
   619  	var (
   620  		okChunksMu sync.Mutex // protects the variables below
   621  		okChunks   []ranges.Range
   622  	)
   623  	g, gCtx := errgroup.WithContext(ctx)
   624  	transferSemaphore := semaphore.NewWeighted(transferLimit)
   625  
   626  	var readErr error
   627  	startMoreTransfers := true
   628  	zeroTime := time.Time{}
   629  	for chunk := uint64(0); startMoreTransfers; chunk++ {
   630  		// Acquire semaphore to limit number of transfers in parallel.
   631  		readErr = transferSemaphore.Acquire(gCtx, 1)
   632  		if readErr != nil {
   633  			break
   634  		}
   635  
   636  		// Read a chunk of data.
   637  		chunkReader, bytesRead, readErr := readerForChunk(content, chunkSize)
   638  		if bytesRead < chunkSize {
   639  			startMoreTransfers = false
   640  		}
   641  		if readErr != nil || bytesRead <= 0 {
   642  			break
   643  		}
   644  
   645  		// Transfer the chunk.
   646  		chunkOffset := uint64(chunkSize)*chunk + offset
   647  		g.Go(func() error {
   648  			// After this upload is done,
   649  			// signal that another transfer can be started.
   650  			defer transferSemaphore.Release(1)
   651  			uploadErr := f.patchFile(gCtx, path, cachedReader(chunkReader), chunkOffset, zeroTime)
   652  			if uploadErr == nil {
   653  				// Remember successfully written chunks.
   654  				okChunksMu.Lock()
   655  				okChunks = append(okChunks, ranges.Range{Pos: int64(chunkOffset), Size: int64(bytesRead)})
   656  				okChunksMu.Unlock()
   657  				fs.Debugf(f, "Done uploading chunk of size %v at offset %v.", bytesRead, chunkOffset)
   658  			} else {
   659  				fs.Infof(f, "Error while uploading chunk at offset %v. Error is %v.", chunkOffset, uploadErr)
   660  			}
   661  			return uploadErr
   662  		})
   663  	}
   664  
   665  	if readErr != nil {
   666  		// Log the error in case it is later ignored because of an upload-error.
   667  		fs.Infof(f, "Error while reading/preparing to upload a chunk. Error is %v.", readErr)
   668  	}
   669  
   670  	err = g.Wait()
   671  
   672  	// Compute the first continuous range of the file content,
   673  	// which does not contain any failed chunks.
   674  	// Do not forget to add the file content up to the starting offset,
   675  	// which is presumed to be already correct.
   676  	rs := ranges.Ranges{}
   677  	rs.Insert(ranges.Range{Pos: 0, Size: int64(offset)})
   678  	for _, chunkRange := range okChunks {
   679  		rs.Insert(chunkRange)
   680  	}
   681  	if len(rs) > 0 && rs[0].Pos == 0 {
   682  		okSize = uint64(rs[0].Size)
   683  	}
   684  
   685  	if err != nil {
   686  		return okSize, err
   687  	}
   688  	if readErr != nil {
   689  		return okSize, readErr
   690  	}
   691  
   692  	return okSize, nil
   693  }
   694  
   695  // patchFile updates the content of the existing file at the given path
   696  // starting at the given offset.
   697  //
   698  // Replaces the file contents starting from the given byte offset
   699  // with the content of the io.ReadSeeker.
   700  // If the offset is beyond the file end, the file is extended up to the offset.
   701  // The maximum size of the update is limited by MaximumUploadBytes.
   702  // The io.ReadSeeker should be resettable by seeking to its start.
   703  // If modTime is not the zero time instant,
   704  // it will be set as the file's modification time after the operation.
   705  //
   706  // NOTE: By extending the file up to the offset this may create sparse files,
   707  // which allocate less space on the file system than their apparent size indicates,
   708  // since holes between data chunks are "real" holes
   709  // and not regions made up of consecutive 0-bytes.
   710  // Subsequent operations (such as copying data)
   711  // usually expand the holes into regions of 0-bytes.
   712  //
   713  // This returns fs.ErrorObjectNotFound if the object is not found.
   714  func (f *Fs) patchFile(ctx context.Context, path string, content io.ReadSeeker, offset uint64, modTime time.Time) error {
   715  	parameters := api.NewQueryParameters()
   716  	parameters.SetPath(path)
   717  	parameters.Set("offset", strconv.FormatUint(offset, 10))
   718  
   719  	if !modTime.IsZero() {
   720  		err := parameters.SetTime("mtime", modTime)
   721  		if err != nil {
   722  			return err
   723  		}
   724  	}
   725  
   726  	opts := rest.Opts{
   727  		Method:      "PATCH",
   728  		Path:        "/file",
   729  		Body:        content,
   730  		ContentType: "application/octet-stream",
   731  		Parameters:  parameters.Values,
   732  		NoResponse:  true,
   733  	}
   734  
   735  	var resp *http.Response
   736  	var err error
   737  	err = f.pacer.Call(func() (bool, error) {
   738  		// Reset the reading index (in case this is a retry).
   739  		_, err = content.Seek(0, io.SeekStart)
   740  		if err != nil {
   741  			return false, err
   742  		}
   743  		resp, err = f.srv.Call(ctx, &opts)
   744  		if isHTTPError(err, 423) {
   745  			return true, err
   746  		}
   747  		return f.shouldRetry(ctx, resp, err)
   748  	})
   749  
   750  	if isHTTPError(err, 404) {
   751  		return fs.ErrorObjectNotFound
   752  	}
   753  	return err
   754  }
   755  
   756  // resizeFile updates the existing file at the given path to be of the given size
   757  // and returns the resulting api-object if successful.
   758  //
   759  // If the given size is smaller than the current filesize,
   760  // the file is cut/truncated at that position.
   761  // If the given size is larger, the file is extended up to that position.
   762  // If modTime is not the zero time instant,
   763  // it will be set as the file's modification time after the operation.
   764  //
   765  // NOTE: By extending the file this may create sparse files,
   766  // which allocate less space on the file system than their apparent size indicates,
   767  // since holes between data chunks are "real" holes
   768  // and not regions made up of consecutive 0-bytes.
   769  // Subsequent operations (such as copying data)
   770  // usually expand the holes into regions of 0-bytes.
   771  //
   772  // This returns fs.ErrorObjectNotFound if the object is not found.
   773  func (f *Fs) resizeFile(ctx context.Context, path string, size uint64, modTime time.Time) (*api.HiDriveObject, error) {
   774  	parameters := api.NewQueryParameters()
   775  	parameters.SetPath(path)
   776  	parameters.Set("size", strconv.FormatUint(size, 10))
   777  
   778  	if !modTime.IsZero() {
   779  		err := parameters.SetTime("mtime", modTime)
   780  		if err != nil {
   781  			return nil, err
   782  		}
   783  	}
   784  
   785  	opts := rest.Opts{
   786  		Method:     "POST",
   787  		Path:       "/file/truncate",
   788  		Parameters: parameters.Values,
   789  	}
   790  
   791  	var result api.HiDriveObject
   792  	var resp *http.Response
   793  	var err error
   794  	err = f.pacer.Call(func() (bool, error) {
   795  		resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
   796  		return f.shouldRetry(ctx, resp, err)
   797  	})
   798  
   799  	switch {
   800  	case err == nil:
   801  		return &result, nil
   802  	case isHTTPError(err, 404):
   803  		return nil, fs.ErrorObjectNotFound
   804  	}
   805  	return nil, err
   806  }
   807  
   808  // ------------------------------------------------------------
   809  
   810  // isHTTPError compares the numerical status code
   811  // of an api.Error to the given HTTP status.
   812  //
   813  // If the given error is not an api.Error or
   814  // a numerical status code could not be determined, this returns false.
   815  // Otherwise this returns whether the status code of the error is equal to the given status.
   816  func isHTTPError(err error, status int64) bool {
   817  	if apiErr, ok := err.(*api.Error); ok {
   818  		errStatus, decodeErr := apiErr.Code.Int64()
   819  		if decodeErr == nil && errStatus == status {
   820  			return true
   821  		}
   822  	}
   823  	return false
   824  }
   825  
   826  // createHiDriveScopes creates oauth-scopes
   827  // from the given user-role and access-permissions.
   828  //
   829  // If the arguments are empty, they will not be included in the result.
   830  func createHiDriveScopes(role string, access string) []string {
   831  	switch {
   832  	case role != "" && access != "":
   833  		return []string{access + "," + role}
   834  	case role != "":
   835  		return []string{role}
   836  	case access != "":
   837  		return []string{access}
   838  	}
   839  	return []string{}
   840  }
   841  
   842  // cachedReader returns a version of the reader that caches its contents and
   843  // can therefore be reset using Seek.
   844  func cachedReader(reader io.Reader) io.ReadSeeker {
   845  	bytesReader, ok := reader.(*bytes.Reader)
   846  	if ok {
   847  		return bytesReader
   848  	}
   849  
   850  	repeatableReader, ok := reader.(*readers.RepeatableReader)
   851  	if ok {
   852  		return repeatableReader
   853  	}
   854  
   855  	return readers.NewRepeatableReader(reader)
   856  }
   857  
   858  // readerForChunk reads a chunk of bytes from reader (after handling any accounting).
   859  // Returns a new io.Reader (chunkReader) for that chunk
   860  // and the number of bytes that have been read from reader.
   861  func readerForChunk(reader io.Reader, length int) (chunkReader io.Reader, bytesRead int, err error) {
   862  	// Unwrap any accounting from the input if present.
   863  	reader, wrap := accounting.UnWrap(reader)
   864  
   865  	// Read a chunk of data.
   866  	buffer := make([]byte, length)
   867  	bytesRead, err = io.ReadFull(reader, buffer)
   868  	if err == io.EOF || err == io.ErrUnexpectedEOF {
   869  		err = nil
   870  	}
   871  	if err != nil {
   872  		return nil, bytesRead, err
   873  	}
   874  	// Truncate unused capacity.
   875  	buffer = buffer[:bytesRead]
   876  
   877  	// Use wrap to put any accounting back for chunkReader.
   878  	return wrap(bytes.NewReader(buffer)), bytesRead, nil
   879  }