github.com/artpar/rclone@v1.67.3/backend/pikpak/pikpak.go (about)

     1  // Package pikpak provides an interface to the PikPak
     2  package pikpak
     3  
     4  // ------------------------------------------------------------
     5  // NOTE
     6  // ------------------------------------------------------------
     7  
     8  // md5sum is not always available, sometimes given empty.
     9  
    10  // sha1sum used for upload differs from the one with official apps.
    11  
    12  // Trashed files are not restored to the original location when using `batchUntrash`
    13  
    14  // Can't stream without `--vfs-cache-mode=full`
    15  
    16  // ------------------------------------------------------------
    17  // TODO
    18  // ------------------------------------------------------------
    19  
    20  // * List() with options starred-only
    21  // * uploadByResumable() with configurable chunk-size
    22  // * user-configurable list chunk
    23  // * backend command: untrash, iscached
    24  // * api(event,task)
    25  
    26  import (
    27  	"bytes"
    28  	"context"
    29  	"encoding/json"
    30  	"errors"
    31  	"fmt"
    32  	"io"
    33  	"math"
    34  	"net/http"
    35  	"net/url"
    36  	"path"
    37  	"reflect"
    38  	"strconv"
    39  	"strings"
    40  	"sync"
    41  	"time"
    42  
    43  	"github.com/artpar/rclone/backend/pikpak/api"
    44  	"github.com/artpar/rclone/fs"
    45  	"github.com/artpar/rclone/fs/accounting"
    46  	"github.com/artpar/rclone/fs/config"
    47  	"github.com/artpar/rclone/fs/config/configmap"
    48  	"github.com/artpar/rclone/fs/config/configstruct"
    49  	"github.com/artpar/rclone/fs/config/obscure"
    50  	"github.com/artpar/rclone/fs/fserrors"
    51  	"github.com/artpar/rclone/fs/hash"
    52  	"github.com/artpar/rclone/lib/dircache"
    53  	"github.com/artpar/rclone/lib/encoder"
    54  	"github.com/artpar/rclone/lib/oauthutil"
    55  	"github.com/artpar/rclone/lib/pacer"
    56  	"github.com/artpar/rclone/lib/random"
    57  	"github.com/artpar/rclone/lib/rest"
    58  	"github.com/aws/aws-sdk-go/aws"
    59  	"github.com/aws/aws-sdk-go/aws/credentials"
    60  	"github.com/aws/aws-sdk-go/aws/session"
    61  	"github.com/aws/aws-sdk-go/service/s3/s3manager"
    62  	"golang.org/x/oauth2"
    63  )
    64  
    65  // Constants
    66  const (
    67  	rcloneClientID              = "YNxT9w7GMdWvEOKa"
    68  	rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
    69  	minSleep                    = 10 * time.Millisecond
    70  	maxSleep                    = 2 * time.Second
    71  	decayConstant               = 2 // bigger for slower decay, exponential
    72  	rootURL                     = "https://api-drive.mypikpak.com"
    73  )
    74  
    75  // Globals
    76  var (
    77  	// Description of how to auth for this app
    78  	oauthConfig = &oauth2.Config{
    79  		Scopes: nil,
    80  		Endpoint: oauth2.Endpoint{
    81  			AuthURL:   "https://user.mypikpak.com/v1/auth/signin",
    82  			TokenURL:  "https://user.mypikpak.com/v1/auth/token",
    83  			AuthStyle: oauth2.AuthStyleInParams,
    84  		},
    85  		ClientID:     rcloneClientID,
    86  		ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
    87  		RedirectURL:  oauthutil.RedirectURL,
    88  	}
    89  )
    90  
    91  // Returns OAuthOptions modified for pikpak
    92  func pikpakOAuthOptions() []fs.Option {
    93  	opts := []fs.Option{}
    94  	for _, opt := range oauthutil.SharedOptions {
    95  		if opt.Name == config.ConfigClientID {
    96  			opt.Advanced = true
    97  		} else if opt.Name == config.ConfigClientSecret {
    98  			opt.Advanced = true
    99  		}
   100  		opts = append(opts, opt)
   101  	}
   102  	return opts
   103  }
   104  
   105  // pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
   106  func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
   107  	// override default client id/secret
   108  	if id, ok := m.Get("client_id"); ok && id != "" {
   109  		oauthConfig.ClientID = id
   110  	}
   111  	if secret, ok := m.Get("client_secret"); ok && secret != "" {
   112  		oauthConfig.ClientSecret = secret
   113  	}
   114  	pass, err := obscure.Reveal(opt.Password)
   115  	if err != nil {
   116  		return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
   117  	}
   118  	t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass)
   119  	if err != nil {
   120  		return fmt.Errorf("failed to retrieve token using username/password: %w", err)
   121  	}
   122  	return oauthutil.PutToken(name, m, t, false)
   123  }
   124  
   125  // Register with Fs
   126  func init() {
   127  	fs.Register(&fs.RegInfo{
   128  		Name:        "pikpak",
   129  		Description: "PikPak",
   130  		NewFs:       NewFs,
   131  		CommandHelp: commandHelp,
   132  		Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
   133  			// Parse config into Options struct
   134  			opt := new(Options)
   135  			err := configstruct.Set(m, opt)
   136  			if err != nil {
   137  				return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
   138  			}
   139  
   140  			switch config.State {
   141  			case "":
   142  				// Check token exists
   143  				if _, err := oauthutil.GetToken(name, m); err != nil {
   144  					return fs.ConfigGoto("authorize")
   145  				}
   146  				return fs.ConfigConfirm("authorize_ok", false, "consent_to_authorize", "Re-authorize for new token?")
   147  			case "authorize_ok":
   148  				if config.Result == "false" {
   149  					return nil, nil
   150  				}
   151  				return fs.ConfigGoto("authorize")
   152  			case "authorize":
   153  				if err := pikpakAuthorize(ctx, opt, name, m); err != nil {
   154  					return nil, err
   155  				}
   156  				return nil, nil
   157  			}
   158  			return nil, fmt.Errorf("unknown state %q", config.State)
   159  		},
   160  		Options: append(pikpakOAuthOptions(), []fs.Option{{
   161  			Name:      "user",
   162  			Help:      "Pikpak username.",
   163  			Required:  true,
   164  			Sensitive: true,
   165  		}, {
   166  			Name:       "pass",
   167  			Help:       "Pikpak password.",
   168  			Required:   true,
   169  			IsPassword: true,
   170  		}, {
   171  			Name: "root_folder_id",
   172  			Help: `ID of the root folder.
   173  Leave blank normally.
   174  
   175  Fill in for rclone to use a non root folder as its starting point.
   176  `,
   177  			Advanced:  true,
   178  			Sensitive: true,
   179  		}, {
   180  			Name:     "use_trash",
   181  			Default:  true,
   182  			Help:     "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--pikpak-use-trash=false` to delete files permanently instead.",
   183  			Advanced: true,
   184  		}, {
   185  			Name:     "trashed_only",
   186  			Default:  false,
   187  			Help:     "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
   188  			Advanced: true,
   189  		}, {
   190  			Name:     "hash_memory_limit",
   191  			Help:     "Files bigger than this will be cached on disk to calculate hash if required.",
   192  			Default:  fs.SizeSuffix(10 * 1024 * 1024),
   193  			Advanced: true,
   194  		}, {
   195  			Name:     config.ConfigEncoding,
   196  			Help:     config.ConfigEncodingHelp,
   197  			Advanced: true,
   198  			Default: (encoder.EncodeCtl |
   199  				encoder.EncodeDot |
   200  				encoder.EncodeBackSlash |
   201  				encoder.EncodeSlash |
   202  				encoder.EncodeDoubleQuote |
   203  				encoder.EncodeAsterisk |
   204  				encoder.EncodeColon |
   205  				encoder.EncodeLtGt |
   206  				encoder.EncodeQuestion |
   207  				encoder.EncodePipe |
   208  				encoder.EncodeLeftSpace |
   209  				encoder.EncodeRightSpace |
   210  				encoder.EncodeRightPeriod |
   211  				encoder.EncodeInvalidUtf8),
   212  		}}...),
   213  	})
   214  }
   215  
   216  // Options defines the configuration for this backend
   217  type Options struct {
   218  	Username            string               `config:"user"`
   219  	Password            string               `config:"pass"`
   220  	RootFolderID        string               `config:"root_folder_id"`
   221  	UseTrash            bool                 `config:"use_trash"`
   222  	TrashedOnly         bool                 `config:"trashed_only"`
   223  	HashMemoryThreshold fs.SizeSuffix        `config:"hash_memory_limit"`
   224  	Enc                 encoder.MultiEncoder `config:"encoding"`
   225  }
   226  
   227  // Fs represents a remote pikpak
   228  type Fs struct {
   229  	name         string             // name of this remote
   230  	root         string             // the path we are working on
   231  	opt          Options            // parsed options
   232  	features     *fs.Features       // optional features
   233  	rst          *rest.Client       // the connection to the server
   234  	dirCache     *dircache.DirCache // Map of directory path to directory id
   235  	pacer        *fs.Pacer          // pacer for API calls
   236  	rootFolderID string             // the id of the root folder
   237  	client       *http.Client       // authorized client
   238  	m            configmap.Mapper
   239  	tokenMu      *sync.Mutex // when renewing tokens
   240  }
   241  
   242  // Object describes a pikpak object
   243  type Object struct {
   244  	fs          *Fs       // what this object is part of
   245  	remote      string    // The remote path
   246  	hasMetaData bool      // whether info below has been set
   247  	id          string    // ID of the object
   248  	size        int64     // size of the object
   249  	modTime     time.Time // modification time of the object
   250  	mimeType    string    // The object MIME type
   251  	parent      string    // ID of the parent directories
   252  	md5sum      string    // md5sum of the object
   253  	link        *api.Link // link to download the object
   254  	linkMu      *sync.Mutex
   255  }
   256  
   257  // ------------------------------------------------------------
   258  
   259  // Name of the remote (as passed into NewFs)
   260  func (f *Fs) Name() string {
   261  	return f.name
   262  }
   263  
   264  // Root of the remote (as passed into NewFs)
   265  func (f *Fs) Root() string {
   266  	return f.root
   267  }
   268  
   269  // String converts this Fs to a string
   270  func (f *Fs) String() string {
   271  	return fmt.Sprintf("PikPak root '%s'", f.root)
   272  }
   273  
   274  // Features returns the optional features of this Fs
   275  func (f *Fs) Features() *fs.Features {
   276  	return f.features
   277  }
   278  
   279  // Precision return the precision of this Fs
   280  func (f *Fs) Precision() time.Duration {
   281  	return fs.ModTimeNotSupported
   282  	// meaning that the modification times from the backend shouldn't be used for syncing
   283  	// as they can't be set.
   284  }
   285  
   286  // DirCacheFlush resets the directory cache - used in testing as an
   287  // optional interface
   288  func (f *Fs) DirCacheFlush() {
   289  	f.dirCache.ResetRoot()
   290  }
   291  
   292  // Hashes returns the supported hash sets.
   293  func (f *Fs) Hashes() hash.Set {
   294  	return hash.Set(hash.MD5)
   295  }
   296  
   297  // parsePath parses a remote path
   298  func parsePath(path string) (root string) {
   299  	root = strings.Trim(path, "/")
   300  	return
   301  }
   302  
   303  // parentIDForRequest returns ParentId for api requests
   304  func parentIDForRequest(dirID string) string {
   305  	if dirID == "root" {
   306  		return ""
   307  	}
   308  	return dirID
   309  }
   310  
   311  // retryErrorCodes is a slice of error codes that we will retry
   312  var retryErrorCodes = []int{
   313  	429, // Too Many Requests.
   314  	500, // Internal Server Error
   315  	502, // Bad Gateway
   316  	503, // Service Unavailable
   317  	504, // Gateway Timeout
   318  	509, // Bandwidth Limit Exceeded
   319  }
   320  
   321  // reAuthorize re-authorize oAuth token during runtime
   322  func (f *Fs) reAuthorize(ctx context.Context) (err error) {
   323  	f.tokenMu.Lock()
   324  	defer f.tokenMu.Unlock()
   325  
   326  	if err := pikpakAuthorize(ctx, &f.opt, f.name, f.m); err != nil {
   327  		return err
   328  	}
   329  	return f.newClientWithPacer(ctx)
   330  }
   331  
   332  // shouldRetry returns a boolean as to whether this resp and err
   333  // deserve to be retried.  It returns the err as a convenience
   334  func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
   335  	if fserrors.ContextError(ctx, &err) {
   336  		return false, err
   337  	}
   338  	if err == nil {
   339  		return false, nil
   340  	}
   341  	if fserrors.ShouldRetry(err) {
   342  		return true, err
   343  	}
   344  	authRetry := false
   345  
   346  	// traceback to possible api.Error wrapped in err, and re-authorize if necessary
   347  	// "unauthenticated" (16): when access_token is invalid, but should be handled by oauthutil
   348  	var terr *oauth2.RetrieveError
   349  	if errors.As(err, &terr) {
   350  		apiErr := new(api.Error)
   351  		if err := json.Unmarshal(terr.Body, apiErr); err == nil {
   352  			if apiErr.Reason == "invalid_grant" {
   353  				// "invalid_grant" (4126): The refresh token is incorrect or expired				//
   354  				// Invalid refresh token. It may have been refreshed by another process.
   355  				authRetry = true
   356  			}
   357  		}
   358  	}
   359  	// Once err was processed by maybeWrapOAuthError() in lib/oauthutil,
   360  	// the above code is no longer sufficient to handle the 'invalid_grant' error.
   361  	if strings.Contains(err.Error(), "invalid_grant") {
   362  		authRetry = true
   363  	}
   364  
   365  	if authRetry {
   366  		if authErr := f.reAuthorize(ctx); authErr != nil {
   367  			return false, fserrors.FatalError(authErr)
   368  		}
   369  	}
   370  
   371  	switch apiErr := err.(type) {
   372  	case *api.Error:
   373  		if apiErr.Reason == "file_rename_uncompleted" {
   374  			// "file_rename_uncompleted" (9): Renaming uncompleted file or folder is not supported
   375  			// This error occurs when you attempt to rename objects
   376  			// right after some server-side changes, e.g. DirMove, Move, Copy
   377  			return true, err
   378  		} else if apiErr.Reason == "file_duplicated_name" {
   379  			// "file_duplicated_name" (3): File name cannot be repeated
   380  			// This error may occur when attempting to rename temp object (newly uploaded)
   381  			// right after the old one is removed.
   382  			return true, err
   383  		} else if apiErr.Reason == "task_daily_create_limit_vip" {
   384  			// "task_daily_create_limit_vip" (11): Sorry, you have submitted too many tasks and have exceeded the current processing capacity, please try again tomorrow
   385  			return false, fserrors.FatalError(err)
   386  		} else if apiErr.Reason == "file_space_not_enough" {
   387  			// "file_space_not_enough" (8): Storage space is not enough
   388  			return false, fserrors.FatalError(err)
   389  		}
   390  	}
   391  
   392  	return authRetry || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   393  }
   394  
   395  // errorHandler parses a non 2xx error response into an error
   396  func errorHandler(resp *http.Response) error {
   397  	// Decode error response
   398  	errResponse := new(api.Error)
   399  	err := rest.DecodeJSON(resp, &errResponse)
   400  	if err != nil {
   401  		fs.Debugf(nil, "Couldn't decode error response: %v", err)
   402  	}
   403  	if errResponse.Reason == "" {
   404  		errResponse.Reason = resp.Status
   405  	}
   406  	if errResponse.Code == 0 {
   407  		errResponse.Code = resp.StatusCode
   408  	}
   409  	return errResponse
   410  }
   411  
   412  // newClientWithPacer sets a new http/rest client with a pacer to Fs
   413  func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
   414  	f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig)
   415  	if err != nil {
   416  		return fmt.Errorf("failed to create oauth client: %w", err)
   417  	}
   418  	f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler)
   419  	f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
   420  	return nil
   421  }
   422  
   423  // newFs partially constructs Fs from the path
   424  //
   425  // It constructs a valid Fs but doesn't attempt to figure out whether
   426  // it is a file or a directory.
   427  func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, error) {
   428  	// Parse config into Options struct
   429  	opt := new(Options)
   430  	if err := configstruct.Set(m, opt); err != nil {
   431  		return nil, err
   432  	}
   433  
   434  	root := parsePath(path)
   435  
   436  	f := &Fs{
   437  		name:    name,
   438  		root:    root,
   439  		opt:     *opt,
   440  		m:       m,
   441  		tokenMu: new(sync.Mutex),
   442  	}
   443  	f.features = (&fs.Features{
   444  		ReadMimeType:            true, // can read the mime type of objects
   445  		CanHaveEmptyDirectories: true, // can have empty directories
   446  		NoMultiThreading:        true, // can't have multiple threads downloading
   447  	}).Fill(ctx, f)
   448  
   449  	if err := f.newClientWithPacer(ctx); err != nil {
   450  		return nil, err
   451  	}
   452  
   453  	return f, nil
   454  }
   455  
   456  // NewFs constructs an Fs from the path, container:path
   457  func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, error) {
   458  	f, err := newFs(ctx, name, path, m)
   459  	if err != nil {
   460  		return nil, err
   461  	}
   462  
   463  	// Set the root folder ID
   464  	if f.opt.RootFolderID != "" {
   465  		// use root_folder ID if set
   466  		f.rootFolderID = f.opt.RootFolderID
   467  	} else {
   468  		// pseudo-root
   469  		f.rootFolderID = "root"
   470  	}
   471  
   472  	f.dirCache = dircache.New(f.root, f.rootFolderID, f)
   473  
   474  	// Find the current root
   475  	err = f.dirCache.FindRoot(ctx, false)
   476  	if err != nil {
   477  		// Assume it is a file
   478  		newRoot, remote := dircache.SplitPath(f.root)
   479  		tempF := *f
   480  		tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
   481  		tempF.root = newRoot
   482  		// Make new Fs which is the parent
   483  		err = tempF.dirCache.FindRoot(ctx, false)
   484  		if err != nil {
   485  			// No root so return old f
   486  			return f, nil
   487  		}
   488  		_, err := tempF.NewObject(ctx, remote)
   489  		if err != nil {
   490  			if err == fs.ErrorObjectNotFound {
   491  				// File doesn't exist so return old f
   492  				return f, nil
   493  			}
   494  			return nil, err
   495  		}
   496  		f.features.Fill(ctx, &tempF)
   497  		// XXX: update the old f here instead of returning tempF, since
   498  		// `features` were already filled with functions having *f as a receiver.
   499  		// See https://github.com/artpar/rclone/issues/2182
   500  		f.dirCache = tempF.dirCache
   501  		f.root = tempF.root
   502  		// return an error with an fs which points to the parent
   503  		return f, fs.ErrorIsFile
   504  	}
   505  	return f, nil
   506  }
   507  
   508  // readMetaDataForPath reads the metadata from the path
   509  func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.File, err error) {
   510  	leaf, dirID, err := f.dirCache.FindPath(ctx, path, false)
   511  	if err != nil {
   512  		if err == fs.ErrorDirNotFound {
   513  			return nil, fs.ErrorObjectNotFound
   514  		}
   515  		return nil, err
   516  	}
   517  
   518  	// checking whether fileObj with name of leaf exists in dirID
   519  	trashed := "false"
   520  	if f.opt.TrashedOnly {
   521  		trashed = "true"
   522  	}
   523  	found, err := f.listAll(ctx, dirID, api.KindOfFile, trashed, func(item *api.File) bool {
   524  		if item.Name == leaf {
   525  			info = item
   526  			return true
   527  		}
   528  		return false
   529  	})
   530  	if err != nil {
   531  		return nil, err
   532  	}
   533  	if !found {
   534  		return nil, fs.ErrorObjectNotFound
   535  	}
   536  	return info, nil
   537  }
   538  
   539  // Return an Object from a path
   540  //
   541  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   542  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) {
   543  	o := &Object{
   544  		fs:     f,
   545  		remote: remote,
   546  		linkMu: new(sync.Mutex),
   547  	}
   548  	var err error
   549  	if info != nil {
   550  		err = o.setMetaData(info)
   551  	} else {
   552  		err = o.readMetaData(ctx) // reads info and meta, returning an error
   553  	}
   554  	if err != nil {
   555  		return nil, err
   556  	}
   557  	return o, nil
   558  }
   559  
   560  // NewObject finds the Object at remote.  If it can't be found
   561  // it returns the error fs.ErrorObjectNotFound.
   562  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   563  	return f.newObjectWithInfo(ctx, remote, nil)
   564  }
   565  
   566  // FindLeaf finds a directory of name leaf in the folder with ID pathID
   567  func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
   568  	// Find the leaf in pathID
   569  	trashed := "false"
   570  	if f.opt.TrashedOnly {
   571  		// still need to list folders
   572  		trashed = ""
   573  	}
   574  	found, err = f.listAll(ctx, pathID, api.KindOfFolder, trashed, func(item *api.File) bool {
   575  		if item.Name == leaf {
   576  			pathIDOut = item.ID
   577  			return true
   578  		}
   579  		return false
   580  	})
   581  	return pathIDOut, found, err
   582  }
   583  
   584  // list the objects into the function supplied
   585  //
   586  // If directories is set it only sends directories
   587  // User function to process a File item from listAll
   588  //
   589  // Should return true to finish processing
   590  type listAllFn func(*api.File) bool
   591  
   592  // Lists the directory required calling the user function on each item found
   593  //
   594  // If the user fn ever returns true then it early exits with found = true
   595  func (f *Fs) listAll(ctx context.Context, dirID, kind, trashed string, fn listAllFn) (found bool, err error) {
   596  	// Url Parameters
   597  	params := url.Values{}
   598  	params.Set("thumbnail_size", api.ThumbnailSizeM)
   599  	params.Set("limit", strconv.Itoa(api.ListLimit))
   600  	params.Set("with_audit", strconv.FormatBool(true))
   601  	if parentID := parentIDForRequest(dirID); parentID != "" {
   602  		params.Set("parent_id", parentID)
   603  	}
   604  
   605  	// Construct filter string
   606  	filters := &api.Filters{}
   607  	filters.Set("Phase", "eq", api.PhaseTypeComplete)
   608  	filters.Set("Trashed", "eq", trashed)
   609  	filters.Set("Kind", "eq", kind)
   610  	if filterStr, err := json.Marshal(filters); err == nil {
   611  		params.Set("filters", string(filterStr))
   612  	}
   613  	// fs.Debugf(f, "list params: %v", params)
   614  
   615  	opts := rest.Opts{
   616  		Method:     "GET",
   617  		Path:       "/drive/v1/files",
   618  		Parameters: params,
   619  	}
   620  
   621  	pageToken := ""
   622  OUTER:
   623  	for {
   624  		opts.Parameters.Set("page_token", pageToken)
   625  
   626  		var info api.FileList
   627  		var resp *http.Response
   628  		err = f.pacer.Call(func() (bool, error) {
   629  			resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
   630  			return f.shouldRetry(ctx, resp, err)
   631  		})
   632  		if err != nil {
   633  			return found, fmt.Errorf("couldn't list files: %w", err)
   634  		}
   635  		if len(info.Files) == 0 {
   636  			break
   637  		}
   638  		for _, item := range info.Files {
   639  			item.Name = f.opt.Enc.ToStandardName(item.Name)
   640  			if fn(item) {
   641  				found = true
   642  				break OUTER
   643  			}
   644  		}
   645  		if info.NextPageToken == "" {
   646  			break
   647  		}
   648  		pageToken = info.NextPageToken
   649  	}
   650  	return
   651  }
   652  
   653  // itemToDirEntry converts a api.File to an fs.DirEntry.
   654  // When the api.File cannot be represented as an fs.DirEntry
   655  // (nil, nil) is returned.
   656  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.File) (entry fs.DirEntry, err error) {
   657  	switch {
   658  	case item.Kind == api.KindOfFolder:
   659  		// cache the directory ID for later lookups
   660  		f.dirCache.Put(remote, item.ID)
   661  		d := fs.NewDir(remote, time.Time(item.ModifiedTime)).SetID(item.ID)
   662  		if item.ParentID == "" {
   663  			d.SetParentID("root")
   664  		} else {
   665  			d.SetParentID(item.ParentID)
   666  		}
   667  		return d, nil
   668  	case f.opt.TrashedOnly && !item.Trashed:
   669  		// ignore object
   670  	default:
   671  		entry, err = f.newObjectWithInfo(ctx, remote, item)
   672  		if err == fs.ErrorObjectNotFound {
   673  			return nil, nil
   674  		}
   675  		return entry, err
   676  	}
   677  	return nil, nil
   678  }
   679  
   680  // List the objects and directories in dir into entries. The
   681  // entries can be returned in any order but should be for a
   682  // complete directory.
   683  //
   684  // dir should be "" to list the root, and should not have
   685  // trailing slashes.
   686  //
   687  // This should return ErrDirNotFound if the directory isn't
   688  // found.
   689  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   690  	// fs.Debugf(f, "List(%q)\n", dir)
   691  	dirID, err := f.dirCache.FindDir(ctx, dir, false)
   692  	if err != nil {
   693  		return nil, err
   694  	}
   695  	var iErr error
   696  	trashed := "false"
   697  	if f.opt.TrashedOnly {
   698  		// still need to list folders
   699  		trashed = ""
   700  	}
   701  	_, err = f.listAll(ctx, dirID, "", trashed, func(item *api.File) bool {
   702  		entry, err := f.itemToDirEntry(ctx, path.Join(dir, item.Name), item)
   703  		if err != nil {
   704  			iErr = err
   705  			return true
   706  		}
   707  		if entry != nil {
   708  			entries = append(entries, entry)
   709  		}
   710  		return false
   711  	})
   712  	if err != nil {
   713  		return nil, err
   714  	}
   715  	if iErr != nil {
   716  		return nil, iErr
   717  	}
   718  	return entries, nil
   719  }
   720  
   721  // CreateDir makes a directory with pathID as parent and name leaf
   722  func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
   723  	// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
   724  	req := api.RequestNewFile{
   725  		Name:     f.opt.Enc.FromStandardName(leaf),
   726  		Kind:     api.KindOfFolder,
   727  		ParentID: parentIDForRequest(pathID),
   728  	}
   729  	info, err := f.requestNewFile(ctx, &req)
   730  	if err != nil {
   731  		return "", err
   732  	}
   733  	return info.File.ID, nil
   734  }
   735  
   736  // Mkdir creates the container if it doesn't exist
   737  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   738  	_, err := f.dirCache.FindDir(ctx, dir, true)
   739  	return err
   740  }
   741  
   742  // About gets quota information
   743  func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
   744  	info, err := f.getAbout(ctx)
   745  	if err != nil {
   746  		return nil, fmt.Errorf("failed to get drive quota: %w", err)
   747  	}
   748  	q := info.Quota
   749  	usage = &fs.Usage{
   750  		Used: fs.NewUsageValue(q.Usage), // bytes in use
   751  		// Trashed: fs.NewUsageValue(q.UsageInTrash), // bytes in trash but this seems not working
   752  	}
   753  	if q.Limit > 0 {
   754  		usage.Total = fs.NewUsageValue(q.Limit)          // quota of bytes that can be used
   755  		usage.Free = fs.NewUsageValue(q.Limit - q.Usage) // bytes which can be uploaded before reaching the quota
   756  	}
   757  	return usage, nil
   758  }
   759  
   760  // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
   761  func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
   762  	id, err := f.dirCache.FindDir(ctx, remote, false)
   763  	if err == nil {
   764  		fs.Debugf(f, "attempting to share directory '%s'", remote)
   765  	} else {
   766  		fs.Debugf(f, "attempting to share single file '%s'", remote)
   767  		o, err := f.NewObject(ctx, remote)
   768  		if err != nil {
   769  			return "", err
   770  		}
   771  		id = o.(*Object).id
   772  	}
   773  	expiry := -1
   774  	if expire < fs.DurationOff {
   775  		expiry = int(math.Ceil(time.Duration(expire).Hours() / 24))
   776  	}
   777  	req := api.RequestShare{
   778  		FileIDs:        []string{id},
   779  		ShareTo:        "publiclink",
   780  		ExpirationDays: expiry,
   781  		PassCodeOption: "NOT_REQUIRED",
   782  	}
   783  	info, err := f.requestShare(ctx, &req)
   784  	if err != nil {
   785  		return "", err
   786  	}
   787  	return info.ShareURL, err
   788  }
   789  
   790  // delete a file or directory by ID w/o using trash
   791  func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (err error) {
   792  	if len(IDs) == 0 {
   793  		return nil
   794  	}
   795  	action := "batchDelete"
   796  	if useTrash {
   797  		action = "batchTrash"
   798  	}
   799  	req := api.RequestBatch{
   800  		IDs: IDs,
   801  	}
   802  	if err := f.requestBatchAction(ctx, action, &req); err != nil {
   803  		return fmt.Errorf("delete object failed: %w", err)
   804  	}
   805  	return nil
   806  }
   807  
   808  // purgeCheck removes the root directory, if check is set then it
   809  // refuses to do so if it has anything in
   810  func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
   811  	root := path.Join(f.root, dir)
   812  	if root == "" {
   813  		return errors.New("can't purge root directory")
   814  	}
   815  	rootID, err := f.dirCache.FindDir(ctx, dir, false)
   816  	if err != nil {
   817  		return err
   818  	}
   819  
   820  	trashedFiles := false
   821  	if check {
   822  		found, err := f.listAll(ctx, rootID, "", "", func(item *api.File) bool {
   823  			if !item.Trashed {
   824  				fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
   825  				return true
   826  			}
   827  			fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
   828  			trashedFiles = true
   829  			return false
   830  		})
   831  		if err != nil {
   832  			return err
   833  		}
   834  		if found {
   835  			return fs.ErrorDirectoryNotEmpty
   836  		}
   837  	}
   838  	if root != "" {
   839  		// trash the directory if it had trashed files
   840  		// in or the user wants to trash, otherwise
   841  		// delete it.
   842  		err = f.deleteObjects(ctx, []string{rootID}, trashedFiles || f.opt.UseTrash)
   843  		if err != nil {
   844  			return err
   845  		}
   846  	} else if check {
   847  		return errors.New("can't purge root directory")
   848  	}
   849  	f.dirCache.FlushDir(dir)
   850  	if err != nil {
   851  		return err
   852  	}
   853  	return nil
   854  }
   855  
   856  // Rmdir deletes the root folder
   857  //
   858  // Returns an error if it isn't empty
   859  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   860  	return f.purgeCheck(ctx, dir, true)
   861  }
   862  
   863  // Purge deletes all the files and the container
   864  //
   865  // Optional interface: Only implement this if you have a way of
   866  // deleting all the files quicker than just running Remove() on the
   867  // result of List()
   868  func (f *Fs) Purge(ctx context.Context, dir string) error {
   869  	return f.purgeCheck(ctx, dir, false)
   870  }
   871  
   872  // CleanUp empties the trash
   873  func (f *Fs) CleanUp(ctx context.Context) (err error) {
   874  	opts := rest.Opts{
   875  		Method:     "PATCH",
   876  		Path:       "/drive/v1/files/trash:empty",
   877  		NoResponse: true, // Only returns `{"task_id":""}
   878  	}
   879  	var resp *http.Response
   880  	err = f.pacer.Call(func() (bool, error) {
   881  		resp, err = f.rst.Call(ctx, &opts)
   882  		return f.shouldRetry(ctx, resp, err)
   883  	})
   884  	if err != nil {
   885  		return fmt.Errorf("couldn't empty trash: %w", err)
   886  	}
   887  	return nil
   888  }
   889  
   890  // Move the object
   891  func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) {
   892  	if len(IDs) == 0 {
   893  		return nil
   894  	}
   895  	req := api.RequestBatch{
   896  		IDs: IDs,
   897  		To:  map[string]string{"parent_id": parentIDForRequest(dirID)},
   898  	}
   899  	if err := f.requestBatchAction(ctx, "batchMove", &req); err != nil {
   900  		return fmt.Errorf("move object failed: %w", err)
   901  	}
   902  	return nil
   903  }
   904  
   905  // renames the object
   906  func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) {
   907  	req := api.File{
   908  		Name: f.opt.Enc.FromStandardName(newName),
   909  	}
   910  	info, err = f.patchFile(ctx, ID, &req)
   911  	if err != nil {
   912  		return nil, fmt.Errorf("rename object failed: %w", err)
   913  	}
   914  	return
   915  }
   916  
   917  // DirMove moves src, srcRemote to this remote at dstRemote
   918  // using server-side move operations.
   919  //
   920  // Will only be called if src.Fs().Name() == f.Name()
   921  //
   922  // If it isn't possible then return fs.ErrorCantDirMove
   923  //
   924  // If destination exists then return fs.ErrorDirExists
   925  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
   926  	srcFs, ok := src.(*Fs)
   927  	if !ok {
   928  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
   929  		return fs.ErrorCantDirMove
   930  	}
   931  
   932  	srcID, srcParentID, srcLeaf, dstParentID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
   933  	if err != nil {
   934  		return err
   935  	}
   936  
   937  	if srcParentID != dstParentID {
   938  		// Do the move
   939  		err = f.moveObjects(ctx, []string{srcID}, dstParentID)
   940  		if err != nil {
   941  			return fmt.Errorf("couldn't dir move: %w", err)
   942  		}
   943  	}
   944  
   945  	// Can't copy and change name in one step so we have to check if we have
   946  	// the correct name after copy
   947  	if srcLeaf != dstLeaf {
   948  		_, err = f.renameObject(ctx, srcID, dstLeaf)
   949  		if err != nil {
   950  			return fmt.Errorf("dirmove: couldn't rename moved dir: %w", err)
   951  		}
   952  	}
   953  	srcFs.dirCache.FlushDir(srcRemote)
   954  	return nil
   955  }
   956  
   957  // Creates from the parameters passed in a half finished Object which
   958  // must have setMetaData called on it
   959  //
   960  // Returns the object, leaf, dirID and error.
   961  //
   962  // Used to create new objects
   963  func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, dirID string, err error) {
   964  	// Create the directory for the object if it doesn't exist
   965  	leaf, dirID, err = f.dirCache.FindPath(ctx, remote, true)
   966  	if err != nil {
   967  		return
   968  	}
   969  	// Temporary Object under construction
   970  	o = &Object{
   971  		fs:      f,
   972  		remote:  remote,
   973  		size:    size,
   974  		modTime: modTime,
   975  		linkMu:  new(sync.Mutex),
   976  	}
   977  	return o, leaf, dirID, nil
   978  }
   979  
   980  // Move src to this remote using server-side move operations.
   981  //
   982  // This is stored with the remote path given.
   983  //
   984  // It returns the destination Object and a possible error.
   985  //
   986  // Will only be called if src.Fs().Name() == f.Name()
   987  //
   988  // If it isn't possible then return fs.ErrorCantMove
   989  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   990  	srcObj, ok := src.(*Object)
   991  	if !ok {
   992  		fs.Debugf(src, "Can't move - not same remote type")
   993  		return nil, fs.ErrorCantMove
   994  	}
   995  	err := srcObj.readMetaData(ctx)
   996  	if err != nil {
   997  		return nil, err
   998  	}
   999  
  1000  	srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
  1001  	if err != nil {
  1002  		return nil, err
  1003  	}
  1004  
  1005  	// Create temporary object
  1006  	dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
  1007  	if err != nil {
  1008  		return nil, err
  1009  	}
  1010  
  1011  	if srcParentID != dstParentID {
  1012  		// Do the move
  1013  		if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
  1014  			return nil, err
  1015  		}
  1016  	}
  1017  	dstObj.id = srcObj.id
  1018  
  1019  	var info *api.File
  1020  	if srcLeaf != dstLeaf {
  1021  		// Rename
  1022  		info, err = f.renameObject(ctx, srcObj.id, dstLeaf)
  1023  		if err != nil {
  1024  			return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
  1025  		}
  1026  	} else {
  1027  		// Update info
  1028  		info, err = f.getFile(ctx, dstObj.id)
  1029  		if err != nil {
  1030  			return nil, fmt.Errorf("move: couldn't update moved file: %w", err)
  1031  		}
  1032  	}
  1033  	return dstObj, dstObj.setMetaData(info)
  1034  }
  1035  
  1036  // copy objects
  1037  func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) {
  1038  	if len(IDs) == 0 {
  1039  		return nil
  1040  	}
  1041  	req := api.RequestBatch{
  1042  		IDs: IDs,
  1043  		To:  map[string]string{"parent_id": parentIDForRequest(dirID)},
  1044  	}
  1045  	if err := f.requestBatchAction(ctx, "batchCopy", &req); err != nil {
  1046  		return fmt.Errorf("copy object failed: %w", err)
  1047  	}
  1048  	return nil
  1049  }
  1050  
  1051  // Copy src to this remote using server side copy operations.
  1052  //
  1053  // This is stored with the remote path given.
  1054  //
  1055  // It returns the destination Object and a possible error.
  1056  //
  1057  // Will only be called if src.Fs().Name() == f.Name()
  1058  //
  1059  // If it isn't possible then return fs.ErrorCantCopy
  1060  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1061  	srcObj, ok := src.(*Object)
  1062  	if !ok {
  1063  		fs.Debugf(src, "Can't copy - not same remote type")
  1064  		return nil, fs.ErrorCantCopy
  1065  	}
  1066  	err := srcObj.readMetaData(ctx)
  1067  	if err != nil {
  1068  		return nil, err
  1069  	}
  1070  
  1071  	// Create temporary object
  1072  	dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
  1073  	if err != nil {
  1074  		return nil, err
  1075  	}
  1076  	if srcObj.parent == dstParentID {
  1077  		// api restriction
  1078  		fs.Debugf(src, "Can't copy - same parent")
  1079  		return nil, fs.ErrorCantCopy
  1080  	}
  1081  	// Copy the object
  1082  	if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
  1083  		return nil, fmt.Errorf("couldn't copy file: %w", err)
  1084  	}
  1085  
  1086  	// Can't copy and change name in one step so we have to check if we have
  1087  	// the correct name after copy
  1088  	srcLeaf, _, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
  1089  	if err != nil {
  1090  		return nil, err
  1091  	}
  1092  
  1093  	if srcLeaf != dstLeaf {
  1094  		// Rename
  1095  		info, err := f.renameObject(ctx, dstObj.id, dstLeaf)
  1096  		if err != nil {
  1097  			return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
  1098  		}
  1099  		err = dstObj.setMetaData(info)
  1100  		if err != nil {
  1101  			return nil, err
  1102  		}
  1103  	} else {
  1104  		// Update info
  1105  		err = dstObj.readMetaData(ctx)
  1106  		if err != nil {
  1107  			return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
  1108  		}
  1109  	}
  1110  	return dstObj, nil
  1111  }
  1112  
  1113  func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size int64, form *api.Form, options ...fs.OpenOption) (err error) {
  1114  	// struct to map. transferring values from MultParts to url parameter
  1115  	params := url.Values{}
  1116  	iVal := reflect.ValueOf(&form.MultiParts).Elem()
  1117  	iTyp := iVal.Type()
  1118  	for i := 0; i < iVal.NumField(); i++ {
  1119  		params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
  1120  	}
  1121  	formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
  1122  	if err != nil {
  1123  		return fmt.Errorf("failed to make multipart upload: %w", err)
  1124  	}
  1125  
  1126  	contentLength := overhead + size
  1127  	opts := rest.Opts{
  1128  		Method:           form.Method,
  1129  		RootURL:          form.URL,
  1130  		Body:             formReader,
  1131  		ContentType:      contentType,
  1132  		ContentLength:    &contentLength,
  1133  		Options:          options,
  1134  		TransferEncoding: []string{"identity"},
  1135  		NoResponse:       true,
  1136  	}
  1137  
  1138  	var resp *http.Response
  1139  	err = f.pacer.CallNoRetry(func() (bool, error) {
  1140  		resp, err = f.rst.Call(ctx, &opts)
  1141  		return f.shouldRetry(ctx, resp, err)
  1142  	})
  1143  	return
  1144  }
  1145  
  1146  func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, resumable *api.Resumable, options ...fs.OpenOption) (err error) {
  1147  	p := resumable.Params
  1148  	endpoint := strings.Join(strings.Split(p.Endpoint, ".")[1:], ".") // "mypikpak.com"
  1149  
  1150  	cfg := &aws.Config{
  1151  		Credentials: credentials.NewStaticCredentials(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken),
  1152  		Region:      aws.String("pikpak"),
  1153  		Endpoint:    &endpoint,
  1154  	}
  1155  	sess, err := session.NewSession(cfg)
  1156  	if err != nil {
  1157  		return
  1158  	}
  1159  
  1160  	uploader := s3manager.NewUploader(sess)
  1161  	// Upload input parameters
  1162  	uParams := &s3manager.UploadInput{
  1163  		Bucket: &p.Bucket,
  1164  		Key:    &p.Key,
  1165  		Body:   in,
  1166  	}
  1167  	// Perform upload with options different than the those in the Uploader.
  1168  	_, err = uploader.UploadWithContext(ctx, uParams, func(u *s3manager.Uploader) {
  1169  		// TODO can be user-configurable
  1170  		u.PartSize = 10 * 1024 * 1024 // 10MB part size
  1171  	})
  1172  	return
  1173  }
  1174  
  1175  func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str string, size int64, options ...fs.OpenOption) (*api.File, error) {
  1176  	// determine upload type
  1177  	uploadType := api.UploadTypeResumable
  1178  	if size >= 0 && size < int64(5*fs.Mebi) {
  1179  		uploadType = api.UploadTypeForm
  1180  	}
  1181  
  1182  	// request upload ticket to API
  1183  	req := api.RequestNewFile{
  1184  		Kind:       api.KindOfFile,
  1185  		Name:       f.opt.Enc.FromStandardName(leaf),
  1186  		ParentID:   parentIDForRequest(dirID),
  1187  		FolderType: "NORMAL",
  1188  		Size:       size,
  1189  		Hash:       strings.ToUpper(sha1Str),
  1190  		UploadType: uploadType,
  1191  	}
  1192  	if uploadType == api.UploadTypeResumable {
  1193  		req.Resumable = map[string]string{"provider": "PROVIDER_ALIYUN"}
  1194  	}
  1195  	newfile, err := f.requestNewFile(ctx, &req)
  1196  	if err != nil {
  1197  		return nil, fmt.Errorf("failed to create a new file: %w", err)
  1198  	}
  1199  	if newfile.File == nil {
  1200  		return nil, fmt.Errorf("invalid response: %+v", newfile)
  1201  	} else if newfile.File.Phase == api.PhaseTypeComplete {
  1202  		// early return; in case of zero-byte objects
  1203  		return newfile.File, nil
  1204  	}
  1205  
  1206  	if uploadType == api.UploadTypeForm && newfile.Form != nil {
  1207  		err = f.uploadByForm(ctx, in, req.Name, size, newfile.Form, options...)
  1208  	} else if uploadType == api.UploadTypeResumable && newfile.Resumable != nil {
  1209  		err = f.uploadByResumable(ctx, in, newfile.Resumable, options...)
  1210  	} else {
  1211  		return nil, fmt.Errorf("unable to proceed upload: %+v", newfile)
  1212  	}
  1213  
  1214  	if err != nil {
  1215  		return nil, fmt.Errorf("failed to upload: %w", err)
  1216  	}
  1217  	// refresh uploaded file info
  1218  	// Compared to `newfile.File` this upgrades several fields...
  1219  	// audit, links, modified_time, phase, revision, and web_content_link
  1220  	return f.getFile(ctx, newfile.File.ID)
  1221  }
  1222  
  1223  // Put the object
  1224  //
  1225  // Copy the reader in to the new object which is returned.
  1226  //
  1227  // The new object may have been created if an error is returned
  1228  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1229  	existingObj, err := f.NewObject(ctx, src.Remote())
  1230  	switch err {
  1231  	case nil:
  1232  		return existingObj, existingObj.Update(ctx, in, src, options...)
  1233  	case fs.ErrorObjectNotFound:
  1234  		// Not found so create it
  1235  		newObj := &Object{
  1236  			fs:     f,
  1237  			remote: src.Remote(),
  1238  			linkMu: new(sync.Mutex),
  1239  		}
  1240  		return newObj, newObj.upload(ctx, in, src, false, options...)
  1241  	default:
  1242  		return nil, err
  1243  	}
  1244  }
  1245  
  1246  // UserInfo fetches info about the current user
  1247  func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
  1248  	user, err := f.getUserInfo(ctx)
  1249  	if err != nil {
  1250  		return nil, err
  1251  	}
  1252  	userInfo = map[string]string{
  1253  		"Id":                user.Sub,
  1254  		"Username":          user.Name,
  1255  		"Email":             user.Email,
  1256  		"PhoneNumber":       user.PhoneNumber,
  1257  		"Password":          user.Password,
  1258  		"Status":            user.Status,
  1259  		"CreatedAt":         time.Time(user.CreatedAt).String(),
  1260  		"PasswordUpdatedAt": time.Time(user.PasswordUpdatedAt).String(),
  1261  	}
  1262  	if vip, err := f.getVIPInfo(ctx); err == nil && vip.Result == "ACCEPTED" {
  1263  		userInfo["VIPExpiresAt"] = time.Time(vip.Data.Expire).String()
  1264  		userInfo["VIPStatus"] = vip.Data.Status
  1265  		userInfo["VIPType"] = vip.Data.Type
  1266  	}
  1267  	return userInfo, nil
  1268  }
  1269  
  1270  // ------------------------------------------------------------
  1271  
  1272  // add offline download task for url
  1273  func (f *Fs) addURL(ctx context.Context, url, path string) (*api.Task, error) {
  1274  	req := api.RequestNewTask{
  1275  		Kind:       api.KindOfFile,
  1276  		UploadType: "UPLOAD_TYPE_URL",
  1277  		URL: &api.URL{
  1278  			URL: url,
  1279  		},
  1280  		FolderType: "DOWNLOAD",
  1281  	}
  1282  	if parentID, err := f.dirCache.FindDir(ctx, path, false); err == nil {
  1283  		req.ParentID = parentIDForRequest(parentID)
  1284  		req.FolderType = ""
  1285  	}
  1286  	return f.requestNewTask(ctx, &req)
  1287  }
  1288  
  1289  type decompressDirResult struct {
  1290  	Decompressed  int
  1291  	SourceDeleted int
  1292  	Errors        int
  1293  }
  1294  
  1295  func (r decompressDirResult) Error() string {
  1296  	return fmt.Sprintf("%d error(s) while decompressing - see log", r.Errors)
  1297  }
  1298  
  1299  // decompress file/files in a directory of an ID
  1300  func (f *Fs) decompressDir(ctx context.Context, filename, id, password string, srcDelete bool) (r decompressDirResult, err error) {
  1301  	_, err = f.listAll(ctx, id, api.KindOfFile, "false", func(item *api.File) bool {
  1302  		if item.MimeType == "application/zip" || item.MimeType == "application/x-7z-compressed" || item.MimeType == "application/x-rar-compressed" {
  1303  			if filename == "" || filename == item.Name {
  1304  				res, err := f.requestDecompress(ctx, item, password)
  1305  				if err != nil {
  1306  					err = fmt.Errorf("unexpected error while requesting decompress of %q: %w", item.Name, err)
  1307  					r.Errors++
  1308  					fs.Errorf(f, "%v", err)
  1309  				} else if res.Status != "OK" {
  1310  					r.Errors++
  1311  					fs.Errorf(f, "%q: %d files: %s", item.Name, res.FilesNum, res.Status)
  1312  				} else {
  1313  					r.Decompressed++
  1314  					fs.Infof(f, "%q: %d files: %s", item.Name, res.FilesNum, res.Status)
  1315  					if srcDelete {
  1316  						derr := f.deleteObjects(ctx, []string{item.ID}, f.opt.UseTrash)
  1317  						if derr != nil {
  1318  							derr = fmt.Errorf("failed to delete %q: %w", item.Name, derr)
  1319  							r.Errors++
  1320  							fs.Errorf(f, "%v", derr)
  1321  						} else {
  1322  							r.SourceDeleted++
  1323  						}
  1324  					}
  1325  				}
  1326  			}
  1327  		}
  1328  		return false
  1329  	})
  1330  	if err != nil {
  1331  		err = fmt.Errorf("couldn't list files to decompress: %w", err)
  1332  		r.Errors++
  1333  		fs.Errorf(f, "%v", err)
  1334  	}
  1335  	if r.Errors != 0 {
  1336  		return r, r
  1337  	}
  1338  	return r, nil
  1339  }
  1340  
  1341  var commandHelp = []fs.CommandHelp{{
  1342  	Name:  "addurl",
  1343  	Short: "Add offline download task for url",
  1344  	Long: `This command adds offline download task for url.
  1345  
  1346  Usage:
  1347  
  1348      rclone backend addurl pikpak:dirpath url
  1349  
  1350  Downloads will be stored in 'dirpath'. If 'dirpath' is invalid, 
  1351  download will fallback to default 'My Pack' folder.
  1352  `,
  1353  }, {
  1354  	Name:  "decompress",
  1355  	Short: "Request decompress of a file/files in a folder",
  1356  	Long: `This command requests decompress of file/files in a folder.
  1357  
  1358  Usage:
  1359  
  1360      rclone backend decompress pikpak:dirpath {filename} -o password=password
  1361      rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
  1362  
  1363  An optional argument 'filename' can be specified for a file located in 
  1364  'pikpak:dirpath'. You may want to pass '-o password=password' for a 
  1365  password-protected files. Also, pass '-o delete-src-file' to delete 
  1366  source files after decompression finished.
  1367  
  1368  Result:
  1369  
  1370      {
  1371          "Decompressed": 17,
  1372          "SourceDeleted": 0,
  1373          "Errors": 0
  1374      }
  1375  `,
  1376  }}
  1377  
  1378  // Command the backend to run a named command
  1379  //
  1380  // The command run is name
  1381  // args may be used to read arguments from
  1382  // opts may be used to read optional arguments from
  1383  //
  1384  // The result should be capable of being JSON encoded
  1385  // If it is a string or a []string it will be shown to the user
  1386  // otherwise it will be JSON encoded and shown to the user like that
  1387  func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
  1388  	switch name {
  1389  	case "addurl":
  1390  		if len(arg) != 1 {
  1391  			return nil, errors.New("need exactly 1 argument")
  1392  		}
  1393  		return f.addURL(ctx, arg[0], "")
  1394  	case "decompress":
  1395  		filename := ""
  1396  		if len(arg) > 0 {
  1397  			filename = arg[0]
  1398  		}
  1399  		id, err := f.dirCache.FindDir(ctx, "", false)
  1400  		if err != nil {
  1401  			return nil, fmt.Errorf("failed to get an ID of dirpath: %w", err)
  1402  		}
  1403  		password := ""
  1404  		if pass, ok := opt["password"]; ok {
  1405  			password = pass
  1406  		}
  1407  		_, srcDelete := opt["delete-src-file"]
  1408  		return f.decompressDir(ctx, filename, id, password, srcDelete)
  1409  	default:
  1410  		return nil, fs.ErrorCommandNotFound
  1411  	}
  1412  }
  1413  
  1414  // ------------------------------------------------------------
  1415  
  1416  // parseFileID gets fid parameter from url query
  1417  func parseFileID(s string) string {
  1418  	if u, err := url.Parse(s); err == nil {
  1419  		if q, err := url.ParseQuery(u.RawQuery); err == nil {
  1420  			return q.Get("fid")
  1421  		}
  1422  	}
  1423  	return ""
  1424  }
  1425  
  1426  // setMetaData sets the metadata from info
  1427  func (o *Object) setMetaData(info *api.File) (err error) {
  1428  	if info.Kind == api.KindOfFolder {
  1429  		return fs.ErrorIsDir
  1430  	}
  1431  	if info.Kind != api.KindOfFile {
  1432  		return fmt.Errorf("%q is %q: %w", o.remote, info.Kind, fs.ErrorNotAFile)
  1433  	}
  1434  	o.hasMetaData = true
  1435  	o.id = info.ID
  1436  	o.size = info.Size
  1437  	o.modTime = time.Time(info.ModifiedTime)
  1438  	o.mimeType = info.MimeType
  1439  	if info.ParentID == "" {
  1440  		o.parent = "root"
  1441  	} else {
  1442  		o.parent = info.ParentID
  1443  	}
  1444  	o.md5sum = info.Md5Checksum
  1445  	if info.Links.ApplicationOctetStream != nil {
  1446  		o.link = info.Links.ApplicationOctetStream
  1447  		if fid := parseFileID(o.link.URL); fid != "" {
  1448  			for mid, media := range info.Medias {
  1449  				if media.Link == nil {
  1450  					continue
  1451  				}
  1452  				if mfid := parseFileID(media.Link.URL); fid == mfid {
  1453  					fs.Debugf(o, "Using a media link from Medias[%d]", mid)
  1454  					o.link = media.Link
  1455  					break
  1456  				}
  1457  			}
  1458  		}
  1459  	}
  1460  	return nil
  1461  }
  1462  
  1463  // setMetaDataWithLink ensures a link for opening an object
  1464  func (o *Object) setMetaDataWithLink(ctx context.Context) error {
  1465  	o.linkMu.Lock()
  1466  	defer o.linkMu.Unlock()
  1467  
  1468  	// check if the current link is valid
  1469  	if o.link.Valid() {
  1470  		return nil
  1471  	}
  1472  
  1473  	// fetch download link with retry scheme
  1474  	// 1 initial attempt and 2 retries are reasonable based on empirical analysis
  1475  	retries := 2
  1476  	for i := 1; i <= retries+1; i++ {
  1477  		info, err := o.fs.getFile(ctx, o.id)
  1478  		if err != nil {
  1479  			return fmt.Errorf("can't fetch download link: %w", err)
  1480  		}
  1481  		if err = o.setMetaData(info); err == nil && o.link.Valid() {
  1482  			return nil
  1483  		}
  1484  		if i <= retries {
  1485  			time.Sleep(time.Duration(200*i) * time.Millisecond)
  1486  		}
  1487  	}
  1488  	return errors.New("can't download - no link to download")
  1489  }
  1490  
  1491  // readMetaData gets the metadata if it hasn't already been fetched
  1492  //
  1493  // it also sets the info
  1494  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1495  	if o.hasMetaData {
  1496  		return nil
  1497  	}
  1498  	info, err := o.fs.readMetaDataForPath(ctx, o.remote)
  1499  	if err != nil {
  1500  		return err
  1501  	}
  1502  	return o.setMetaData(info)
  1503  }
  1504  
  1505  // Fs returns the parent Fs
  1506  func (o *Object) Fs() fs.Info {
  1507  	return o.fs
  1508  }
  1509  
  1510  // Return a string version
  1511  func (o *Object) String() string {
  1512  	if o == nil {
  1513  		return "<nil>"
  1514  	}
  1515  	return o.remote
  1516  }
  1517  
  1518  // Remote returns the remote path
  1519  func (o *Object) Remote() string {
  1520  	return o.remote
  1521  }
  1522  
  1523  // Hash returns the Md5sum of an object returning a lowercase hex string
  1524  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1525  	if t != hash.MD5 {
  1526  		return "", hash.ErrUnsupported
  1527  	}
  1528  	if o.md5sum == "" {
  1529  		return "", nil
  1530  	}
  1531  	return strings.ToLower(o.md5sum), nil
  1532  }
  1533  
  1534  // Size returns the size of an object in bytes
  1535  func (o *Object) Size() int64 {
  1536  	err := o.readMetaData(context.TODO())
  1537  	if err != nil {
  1538  		fs.Logf(o, "Failed to read metadata: %v", err)
  1539  		return 0
  1540  	}
  1541  	return o.size
  1542  }
  1543  
  1544  // MimeType of an Object if known, "" otherwise
  1545  func (o *Object) MimeType(ctx context.Context) string {
  1546  	return o.mimeType
  1547  }
  1548  
  1549  // ID returns the ID of the Object if known, or "" if not
  1550  func (o *Object) ID() string {
  1551  	return o.id
  1552  }
  1553  
  1554  // ParentID returns the ID of the Object parent if known, or "" if not
  1555  func (o *Object) ParentID() string {
  1556  	return o.parent
  1557  }
  1558  
  1559  // ModTime returns the modification time of the object
  1560  func (o *Object) ModTime(ctx context.Context) time.Time {
  1561  	err := o.readMetaData(ctx)
  1562  	if err != nil {
  1563  		fs.Logf(o, "Failed to read metadata: %v", err)
  1564  		return time.Now()
  1565  	}
  1566  	return o.modTime
  1567  }
  1568  
  1569  // SetModTime sets the modification time of the local fs object
  1570  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1571  	return fs.ErrorCantSetModTime
  1572  }
  1573  
  1574  // Storable returns a boolean showing whether this object storable
  1575  func (o *Object) Storable() bool {
  1576  	return true
  1577  }
  1578  
  1579  // Remove an object
  1580  func (o *Object) Remove(ctx context.Context) error {
  1581  	return o.fs.deleteObjects(ctx, []string{o.id}, o.fs.opt.UseTrash)
  1582  }
  1583  
  1584  // httpResponse gets an http.Response object for the object
  1585  // using the url and method passed in
  1586  func (o *Object) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (res *http.Response, err error) {
  1587  	if url == "" {
  1588  		return nil, errors.New("forbidden to download - check sharing permission")
  1589  	}
  1590  	req, err := http.NewRequestWithContext(ctx, method, url, nil)
  1591  	if err != nil {
  1592  		return nil, err
  1593  	}
  1594  	fs.FixRangeOption(options, o.size)
  1595  	fs.OpenOptionAddHTTPHeaders(req.Header, options)
  1596  	if o.size == 0 {
  1597  		// Don't supply range requests for 0 length objects as they always fail
  1598  		delete(req.Header, "Range")
  1599  	}
  1600  	err = o.fs.pacer.Call(func() (bool, error) {
  1601  		res, err = o.fs.client.Do(req)
  1602  		return o.fs.shouldRetry(ctx, res, err)
  1603  	})
  1604  	if err != nil {
  1605  		return nil, err
  1606  	}
  1607  	return res, nil
  1608  }
  1609  
  1610  // open a url for reading
  1611  func (o *Object) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1612  	res, err := o.httpResponse(ctx, url, "GET", options)
  1613  	if err != nil {
  1614  		return nil, fmt.Errorf("open file failed: %w", err)
  1615  	}
  1616  	return res.Body, nil
  1617  }
  1618  
  1619  // Open an object for read
  1620  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1621  	if o.id == "" {
  1622  		return nil, errors.New("can't download - no id")
  1623  	}
  1624  	if o.size == 0 {
  1625  		// zero-byte objects may have no download link
  1626  		return io.NopCloser(bytes.NewBuffer([]byte(nil))), nil
  1627  	}
  1628  	if err = o.setMetaDataWithLink(ctx); err != nil {
  1629  		return nil, err
  1630  	}
  1631  	return o.open(ctx, o.link.URL, options...)
  1632  }
  1633  
  1634  // Update the object with the contents of the io.Reader, modTime and size
  1635  //
  1636  // If existing is set then it updates the object rather than creating a new one.
  1637  //
  1638  // The new object may have been created if an error is returned
  1639  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1640  	return o.upload(ctx, in, src, true, options...)
  1641  }
  1642  
  1643  // upload uploads the object with or without using a temporary file name
  1644  func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, withTemp bool, options ...fs.OpenOption) (err error) {
  1645  	size := src.Size()
  1646  	remote := o.Remote()
  1647  
  1648  	// Create the directory for the object if it doesn't exist
  1649  	leaf, dirID, err := o.fs.dirCache.FindPath(ctx, remote, true)
  1650  	if err != nil {
  1651  		return err
  1652  	}
  1653  
  1654  	// Calculate sha1sum; grabbed from package jottacloud
  1655  	hashStr, err := src.Hash(ctx, hash.SHA1)
  1656  	if err != nil || hashStr == "" {
  1657  		// unwrap the accounting from the input, we use wrap to put it
  1658  		// back on after the buffering
  1659  		var wrap accounting.WrapFn
  1660  		in, wrap = accounting.UnWrap(in)
  1661  		var cleanup func()
  1662  		hashStr, in, cleanup, err = readSHA1(in, size, int64(o.fs.opt.HashMemoryThreshold))
  1663  		defer cleanup()
  1664  		if err != nil {
  1665  			return fmt.Errorf("failed to calculate SHA1: %w", err)
  1666  		}
  1667  		// Wrap the accounting back onto the stream
  1668  		in = wrap(in)
  1669  	}
  1670  
  1671  	if !withTemp {
  1672  		info, err := o.fs.upload(ctx, in, leaf, dirID, hashStr, size, options...)
  1673  		if err != nil {
  1674  			return err
  1675  		}
  1676  		return o.setMetaData(info)
  1677  	}
  1678  
  1679  	// We have to fall back to upload + rename
  1680  	tempName := "rcloneTemp" + random.String(8)
  1681  	info, err := o.fs.upload(ctx, in, tempName, dirID, hashStr, size, options...)
  1682  	if err != nil {
  1683  		return err
  1684  	}
  1685  
  1686  	// upload was successful, need to delete old object before rename
  1687  	if err = o.Remove(ctx); err != nil {
  1688  		return fmt.Errorf("failed to remove old object: %w", err)
  1689  	}
  1690  
  1691  	// rename also updates metadata
  1692  	if info, err = o.fs.renameObject(ctx, info.ID, leaf); err != nil {
  1693  		return fmt.Errorf("failed to rename temp object: %w", err)
  1694  	}
  1695  	return o.setMetaData(info)
  1696  }
  1697  
  1698  // Check the interfaces are satisfied
  1699  var (
  1700  	// _ fs.ListRer         = (*Fs)(nil)
  1701  	// _ fs.ChangeNotifier  = (*Fs)(nil)
  1702  	// _ fs.PutStreamer     = (*Fs)(nil)
  1703  	_ fs.Fs              = (*Fs)(nil)
  1704  	_ fs.Purger          = (*Fs)(nil)
  1705  	_ fs.CleanUpper      = (*Fs)(nil)
  1706  	_ fs.Copier          = (*Fs)(nil)
  1707  	_ fs.Mover           = (*Fs)(nil)
  1708  	_ fs.DirMover        = (*Fs)(nil)
  1709  	_ fs.Commander       = (*Fs)(nil)
  1710  	_ fs.DirCacheFlusher = (*Fs)(nil)
  1711  	_ fs.PublicLinker    = (*Fs)(nil)
  1712  	_ fs.Abouter         = (*Fs)(nil)
  1713  	_ fs.UserInfoer      = (*Fs)(nil)
  1714  	_ fs.Object          = (*Object)(nil)
  1715  	_ fs.MimeTyper       = (*Object)(nil)
  1716  	_ fs.IDer            = (*Object)(nil)
  1717  	_ fs.ParentIDer      = (*Object)(nil)
  1718  )